source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__plus_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64)
// A*D function (colscale): GB (_AxD__plus_fp64)
// D*A function (rowscale): GB (_DxB__plus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64)
// C=scalar+B GB (_bind1st__plus_fp64)
// C=scalar+B' GB (_bind1st_tran__plus_fp64)
// C=A+scalar GB (_bind2nd__plus_fp64)
// C=A'+scalar GB (_bind2nd_tran__plus_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dictionary-omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <omp.h>
#include "dictionary-util.c"
#include "../globals.h"
int compare_candidates(FILE **file, char *password_hash, int verbose);
int run_chunk(char *password_hash, char **candidate_array, int chunk_size, int verbose);
/**
* dictionary_crack() - OpenMP Implementation
*
* The OpenMP implementation of the dictionary attack.
*
* @param password_hash is the hashed password string to crack.
* @param dictionary_path is the full path, including the dictionary filename.
* @param verbose is a flag for verbose mode. Set to 1 to enable.
* @return the result as an integer value, FOUND (0) or NOT_FOUND (1).
*/
int dictionary_crack(char *password_hash, char *dictionary_path, int verbose)
{
// Print input parameters
if( verbose )
{
printf("\n>>> Using dictionary path: %s\n", dictionary_path);
print_password_hash(password_hash);
}
// Open file
FILE *file = fopen(dictionary_path, "r");
// Do calculation
int result = compare_candidates(&file, password_hash, verbose);
if(result == NOT_FOUND)
print_not_found(verbose);
// Cleanup
fclose(file);
return result;
}
/**
* compare_candidates() - comparing password_hash against hashed dictionary entires
* (OpenMP Implementation)
*
* 1. Manages iterating through the dictionary file and initiating the has comparisons.
* 2. Returns the result value (FOUND or NOT_FOUND) and the plain text password, if found.
*
* @param file is a pointer to the dictionary file in memory.
* @param password_hash is the hashed password string to crack.
* @param verbose is a flag for verbose mode. Set to 1 to enable.
* @return the result as an integer value, FOUND (0) or NOT_FOUND (1).
*/
int compare_candidates(FILE **file, char *password_hash, int verbose)
{
char *line = NULL;
size_t len = 0;
ssize_t read;
int result = NOT_FOUND;
int counter = 0;
char * candidate_array[CHUNK_SIZE];
while ((read = getline(&line, &len, *file) != -1) && result == NOT_FOUND)
{
remove_new_line(line, &candidate_array[counter]);
if (counter++ == CHUNK_SIZE)
{
result = run_chunk(password_hash, candidate_array, CHUNK_SIZE, verbose);
counter=0;
}
}
// finish off remaining work that didn't fit in a chunk.
if (counter > 0)
{
result = run_chunk(password_hash, candidate_array, counter, verbose);
}
return result;
}
/**
* run_chunk() - does hash comaprison on a chunk of passwords read from disk
* (OpenMP Implementation)
*
* 1. Manages iterating through the dictionary file and initiating the has comparisons.
* 2. Returns the result value (FOUND or NOT_FOUND) and the plain text password, if found.
*
* @param file is a pointer to the dictionary file in memory.
* @param password_hash is the hashed password string to crack.
* @param candidate_array is the array of candidate passwords.
* @param array_size is the size of the candidate array.
* @param verbose is a flag for verbose mode. Set to 1 to enable.
* @return the result as an integer value, FOUND (0) or NOT_FOUND (1).
*/
int run_chunk(char *password_hash, char **candidate_array, int array_size, int verbose)
{
int result = NOT_FOUND;
int i;
#pragma omp parallel
#pragma omp for schedule(auto)
for (i=0; i<array_size; i++)
{
int tempResult = do_comparison(password_hash, candidate_array[i], verbose);
if (tempResult == FOUND) {
#pragma omp critical
result = FOUND;
}
}
return result;
}
|
DRACC_OMP_027_MxV_Partially_Missing_Exit_Data_yes.c | /*
Matrix Vector multiplication without complitly copying back the result c, while utilising the enter data construct.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define C 512
int *a;
int *b;
int *c;
int init(){
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
int Mult(){
#pragma omp target enter data map(to:a[0:C],b[0:C*C],c[0:C]) device(0)
#pragma omp target device(0)
{
#pragma omp teams distribute parallel for
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
c[i]+=b[j+i*C]*a[j];
}
}
}
#pragma omp target exit data map(from:c[0:C/2]) map(release:a[0:C],b[0:C*C]) device(0)
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = malloc(C*sizeof(int));
b = malloc(C*C*sizeof(int));
c = malloc(C*sizeof(int));
init();
Mult();
check();
free(a);
free(b);
free(c);
return 0;
} |
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), workspace_(workspace) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
void* workspace_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = ctc_helper::BLANK;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = ctc_helper::BLANK;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation);
denom += probs[r + col_offset];
}
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] /= denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[ctc_helper::BLANK + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != ctc_helper::BLANK && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != ctc_helper::BLANK && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[ctc_helper::BLANK + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
collatzGuided.c | // test file to execute the collatz conjecture on 1 proc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
typedef unsigned long long ullong;
ullong hotpo(ullong currn);
int main(int argc, char** argv) {
ullong n, // track current n
high, // highest number recorded
nmax = (argc > 1) ? atoi(argv[1]) : 50,
imax = 2000000; // max number of iteration for a seed n
#pragma omp parallel
{
printf("worker %d/%d ready to roll\n", omp_get_thread_num(), omp_get_num_threads());
}
high = 0; // starting with n itself as highest
/* timers */
double startTime = omp_get_wtime(),
endTime;
// #pragma omp parallel for private(high) schedule(guided) reduction(max:high)
#pragma omp parallel for schedule(guided, 50) reduction(max:high)
for(ullong j = 1; j <= nmax; ++j) {
n = j;
// printf("n: %lld", n);
for(ullong i = 1; i <= imax; ++i) {
n = hotpo(n);
if(n > high) high = n;
// if(i < 10) printf(",%lld",n);
if( n == 1 ) break; // stop if reach 1
}
// printf("\n");
}
printf("\nHigh: %lld\n", high);
endTime = omp_get_wtime();
printf("\nruntime = %.16e\n", endTime - startTime);
return 0;
}
ullong hotpo(ullong currn) {
return (
(currn % 2 == 0)? currn/2 : 3*currn + 1
);
} |
lsm3d_blsm_openmp_v1.c | #include "openst/eikonal/lsm.h"
#define M_LSM3D_IMP_NAME "BLSMv1"
const char OPENST_LSM3D_COMPUTEPARTIAL_IMP_NAME[] = M_LSM3D_IMP_NAME;
const size_t OPENST_LSM3D_COMPUTEPARTIAL_IMP_NAME_LENGTH = sizeof(M_LSM3D_IMP_NAME);
int OpenST_LSM3D_ComputePartial(OPENST_FLOAT *U, char *LSM_UNLOCKED, OPENST_FLOAT *V,
size_t NI, size_t NJ, size_t NK,
OPENST_FLOAT HI, OPENST_FLOAT HJ, OPENST_FLOAT HK,
int start_iter, int max_iter, int *converged,
size_t BSIZE_I, size_t BSIZE_J, size_t BSIZE_K,
OPENST_FLOAT EPS){
int total_it, it, notconvergedl, notconvergedt;
int REVI, REVJ, REVK;
size_t NBI, NBJ, NBK;
#if (_OPENMP > 200203)
size_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
#else
#pragma message("WARNING: size_t to ptrdiff_t cast enabled")
ptrdiff_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
#endif
if(start_iter >= max_iter){
return max_iter;
}
total_it = start_iter;
notconvergedl = 0;
NBI = NI/BSIZE_I + (NI % BSIZE_I > 0);
NBJ = NJ/BSIZE_J + (NJ % BSIZE_J > 0);
NBK = NK/BSIZE_K + (NK % BSIZE_K > 0);
#pragma omp parallel default(none) \
shared(BSIZE_I, BSIZE_J, BSIZE_K, NBI, NBJ, NBK, \
start_iter, total_it, notconvergedl, NI, NJ, NK, \
U, LSM_UNLOCKED, V, HI, HJ, HK, max_iter, EPS) \
private(it, REVI, REVJ, REVK, notconvergedt, \
levelr, K1, K2, level, I1, I2, ir, jr, kr)
{
for(it = start_iter; it < max_iter; ++it){
#pragma omp single nowait
{
++total_it;
notconvergedl = 0;
}
notconvergedt = 0;
OpenST_FSM3D_GetSweepOrder(it, &REVI, &REVJ, &REVK);
for(levelr = 0; levelr < NBI + NBJ + NBK - 2; ++levelr){
K1 = (NBI + NBJ - 2 < levelr) ?
(levelr - NBI - NBJ + 2) : 0;
K2 = (NBK - 1 > levelr) ? levelr : NBK - 1;
for(kr = K1; kr <= K2; ++kr){
level = levelr - kr;
I1 = (NBJ - 1 < level) ? (level - NBJ + 1) : 0;
I2 = (NBI - 1 > level) ? level : NBI - 1;
#pragma omp for nowait schedule(dynamic,1)
for(ir = I1; ir <= I2; ++ir){
jr = level - ir;
if(OpenST_LSM3D_BlockSerial(U, LSM_UNLOCKED, V,
NI, NJ, NK,
HI, HJ, HK,
REVI, REVJ, REVK,
ir * BSIZE_I, jr * BSIZE_J,
kr * BSIZE_K,
BSIZE_I, BSIZE_J, BSIZE_K,
EPS)){
notconvergedt = 1;
}
}
}
#pragma omp barrier
}
#pragma omp atomic
notconvergedl += notconvergedt;
#pragma omp barrier
#pragma omp flush (notconvergedl)
if(!notconvergedl){
break;
}
#pragma omp barrier
}
}
*converged = (notconvergedl == 0);
return total_it;
}
|
prob1.c | #include <omp.h>
#include <stdio.h>
#define N 8
int main(int argc, char** argv)
{
int arr[32];
int i;
omp_set_num_threads(N);
#pragma omp parallel shared(arr) private(i)
{
#pragma omp for
for (i = 0; i < 32; i++)
{
arr[i] = 0;
}
}
for (i = 0; i < 32; i++)
{
printf("arr[%d] = %d\n", i, arr[i]);
}
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_AxB_dot3_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot3_template: C<M>=A'*B via dot products, where C is sparse/hyper
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C and M are both sparse or hyper, and C->h is a copy of M->h.
// M is present, and not complemented. It may be valued or structural.
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [tid].kfirst ;
int64_t klast = TaskList [tid].klast ;
int64_t pC_first = TaskList [tid].pC ;
int64_t pC_last = TaskList [tid].pC_end ;
int64_t bpleft = 0 ; // Ch is not jumbled
int64_t task_nzombies = 0 ; // # of zombies found by this task
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get C(:,k) and M(:k)
//------------------------------------------------------------------
#if defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// M and C are sparse
const int64_t j = k ;
#else
// M and C are either both sparse or both hypersparse
const int64_t j = GBH (Ch, k) ;
#endif
int64_t pC_start = Cp [k] ;
int64_t pC_end = Cp [k+1] ;
if (k == kfirst)
{
// First vector for task; may only be partially owned.
pC_start = pC_first ;
pC_end = GB_IMIN (pC_end, pC_last) ;
}
else if (k == klast)
{
// Last vector for task; may only be partially owned.
pC_end = pC_last ;
}
else
{
// task completely owns this vector C(:,k).
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
#if GB_B_IS_HYPER
// B is hyper
int64_t pB_start, pB_end ;
GB_lookup (true, Bh, Bp, vlen, &bpleft, bnvec-1, j,
&pB_start, &pB_end) ;
#elif GB_B_IS_SPARSE
// B is sparse
const int64_t pB_start = Bp [j] ;
const int64_t pB_end = Bp [j+1] ;
#else
// B is bitmap or full
const int64_t pB_start = j * vlen ;
#endif
#if (GB_B_IS_SPARSE || GB_B_IS_HYPER)
const int64_t bjnz = pB_end - pB_start ;
if (bjnz == 0)
{
// no work to do if B(:,j) is empty, except for zombies
task_nzombies += (pC_end - pC_start) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// C(i,j) is a zombie
int64_t i = Mi [pC] ;
Ci [pC] = GB_FLIP (i) ;
}
continue ;
}
#if (GB_A_IS_SPARSE || GB_A_IS_HYPER)
// Both A and B are sparse; get first and last in B(:,j)
const int64_t ib_first = Bi [pB_start] ;
const int64_t ib_last = Bi [pB_end-1] ;
#endif
#endif
//------------------------------------------------------------------
// C(:,j)<M(:,j)> = A(:,i)'*B(:,j)
//------------------------------------------------------------------
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
//--------------------------------------------------------------
// get C(i,j) and M(i,j)
//--------------------------------------------------------------
bool cij_exists = false ;
GB_CIJ_DECLARE (cij) ;
// get the value of M(i,j)
int64_t i = Mi [pC] ;
#if !defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// if M is structural, no need to check its values
if (GB_mcast (Mx, pC, msize))
#endif
{
//----------------------------------------------------------
// the mask allows C(i,j) to be computed
//----------------------------------------------------------
#if GB_A_IS_HYPER
// A is hyper
int64_t pA, pA_end ;
int64_t apleft = 0 ; // M might be jumbled
GB_lookup (true, Ah, Ap, vlen, &apleft, anvec-1, i,
&pA, &pA_end) ;
const int64_t ainz = pA_end - pA ;
if (ainz > 0)
#elif GB_A_IS_SPARSE
// A is sparse
int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
const int64_t ainz = pA_end - pA ;
if (ainz > 0)
#else
// A is bitmap or full
const int64_t pA = i * vlen ;
#endif
{
// C(i,j) = A(:,i)'*B(:,j)
#include "GB_AxB_dot_cij.c"
}
}
if (!GB_CIJ_EXISTS)
{
// C(i,j) is a zombie
task_nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
nzombies += task_nzombies ;
}
}
#undef GB_A_IS_SPARSE
#undef GB_A_IS_HYPER
#undef GB_A_IS_BITMAP
#undef GB_A_IS_FULL
#undef GB_B_IS_SPARSE
#undef GB_B_IS_HYPER
#undef GB_B_IS_BITMAP
#undef GB_B_IS_FULL
|
DRB038-truedepseconddimension-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15
*/
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int j;
int len = 1000;
if (argc > 1)
len = atoi(argv[1]);
int n = len;
int m = len;
double b[n][m];
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = (i + j);
}
}
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
for (j = 1; j <= m - 1; j += 1) {
b[i][j] = b[i][j - 1];
}
}
for (i = 0; i <= n - 1; i += 1) {
for (j = 0; j <= m - 1; j += 1) {
printf("%d\n",b[i][j]);
}
}
return 0;
}
|
3mm.c | /**
* 3mm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define NI SIZE
#define NJ SIZE
#define NK SIZE
#define NL SIZE
#define NM SIZE
#pragma GCC diagnostic ignored "-Wbuiltin-macro-redefined"
#pragma clang diagnostic ignored "-Wmacro-redefined"
#define ERROR_THRESHOLD 1.5
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < NI; i++) {
for (j = 0; j < NK; j++) {
A[i * NK + j] = ((DATA_TYPE)i * j) / NI;
}
}
for (i = 0; i < NK; i++) {
for (j = 0; j < NJ; j++) {
B[i * NJ + j] = ((DATA_TYPE)i * (j + 1)) / NJ;
}
}
for (i = 0; i < NJ; i++) {
for (j = 0; j < NM; j++) {
C[i * NM + j] = ((DATA_TYPE)i * (j + 3)) / NL;
}
}
for (i = 0; i < NM; i++) {
for (j = 0; j < NL; j++) {
D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK;
}
}
}
int compareResults(DATA_TYPE *G, DATA_TYPE *G_OMP) {
int i, j, fail;
fail = 0;
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
if (percentDiff(G[i * NL + j], G_OMP[i * NL + j]) >
ERROR_THRESHOLD) {
fail++;
}
}
}
return fail;
}
void mm3(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
int i, j, k;
/* E := A*B */
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
E[i * NJ + j] = 0;
for (k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
/* F := C*D */
for (i = 0; i < NJ; i++) {
for (j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
void mm3_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
/* E := A*B */
#pragma omp target teams \
map(to : A[ : NI *NK], B[ : NK *NJ], C[ : NJ *NM], D[ : NM *NL]) \
map(from : E[ : NI *NJ], F[ : NJ *NL], G[ : NI *NL]) \
device(OMP_DEVICE_ID) \
thread_limit(128)
{
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NJ; j++) {
LLVM_MCA_BEGIN("kernel");
E[i * NJ + j] = 0;
for (int k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
LLVM_MCA_END("kernel");
}
}
/* F := C*D */
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < NJ; i++) {
for (int j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (int k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (int k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
}
int main(int argc, char **argv) {
fprintf(
stdout,
"<< Linear Algebra: 3 Matrix Multiplications (E=A.B; F=C.D; G=E.F) >>\n");
// declare arrays and allocate memory
DATA_TYPE *A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE));
DATA_TYPE *B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE));
DATA_TYPE *C = (DATA_TYPE *)malloc(NJ * NM * sizeof(DATA_TYPE));
DATA_TYPE *D = (DATA_TYPE *)malloc(NM * NL * sizeof(DATA_TYPE));
DATA_TYPE *E = NULL;
DATA_TYPE *F = NULL;
DATA_TYPE *G = NULL;
DATA_TYPE *E_OMP = NULL;
DATA_TYPE *F_OMP = NULL;
DATA_TYPE *G_OMP = NULL;
// initialize arrays
init_array(A, B, C, D);
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
E_OMP = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE));
F_OMP = (DATA_TYPE *)calloc(NJ * NL, sizeof(DATA_TYPE));
G_OMP = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE));
BENCHMARK_OMP(mm3_OMP(A, B, C, D, E_OMP, F_OMP, G_OMP));
// prevent dead code elimination
DCE_PREVENT(G_OMP, NI*NL);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
E = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
F = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE));
G = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE));
BENCHMARK_CPU(mm3(A, B, C, D, E, F, G));
// prevent dead code elimination
DCE_PREVENT(G, NI*NL);
#endif
int fail = 0;
// if test mode enabled, compare the results
#ifdef RUN_TEST
fail = compareResults(G, G_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// Release memory
free(A);
free(B);
free(C);
free(D);
free(E);
free(E_OMP);
free(F);
free(F_OMP);
free(G);
free(G_OMP);
return fail;
}
|
cancel-parallel-1.c | /* { dg-do run } */
/* { dg-set-target-env-var OMP_CANCELLATION "true" } */
#include <stdlib.h>
#include <omp.h>
int
main ()
{
#pragma omp parallel num_threads (32)
{
#pragma omp cancel parallel
if (omp_get_cancellation ())
abort ();
}
return 0;
}
|
residualbased_elimination_builder_and_solver_componentwise.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "includes/global_pointer_variables.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the
building.
the creation of the DofList and the construction of the system matrix is in this case much faster
as the neighborhood relationships are considered to be known
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace ,
class TLinearSolver,
class TVariableType
>
class ResidualBasedEliminationBuilderAndSolverComponentwise
: public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationBuilderAndSolverComponentwise );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> ResidualBasedEliminationBuilderAndSolverType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedEliminationBuilderAndSolverComponentwise",
"components_wise_variable" : "SCALAR_VARIABLE_OR_COMPONENT"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
rVar = KratosComponents<TVariableType>::Get(ThisParameters["components_wise_variable"].GetString());
}
/**
* @brief Default constructor. Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,TVariableType const& Var)
: ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
, rVar(Var)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolverComponentwise() override {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
for(int i = 0; i<A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
}
double start_prod = OpenMPUtils::GetCurrentTime();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
DenseVector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// A all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
{
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
}
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
if (this->GetEchoLevel()>0)
{
double stop_prod = OpenMPUtils::GetCurrentTime();
std::cout << "parallel building time: " << stop_prod - start_prod << std::endl;
}
#ifdef _OPENMP
for(int i = 0; i<A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part
) override
{
KRATOS_TRY
//fills a list of "active" nodes defined as nodes which have neighbours
// AND no fixed pressure
mActiveNodes.clear();
mActiveNodes.reserve(r_model_part.Nodes().size() );
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
mActiveNodes.push_back(*(it.base() ));
}
}
//fills the DofList and give a unique progressive tag to each node
BaseType::mDofSet.clear();
BaseType::mDofSet.reserve(mActiveNodes.size() );
for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++)
{
BaseType::mDofSet.push_back( iii->pGetDof(rVar) );
}
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) );
pA.swap(pNewA);
}
if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) );
pDx.swap(pNewDx);
}
if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) );
pb.swap(pNewb);
}
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
//KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
}
if (Dx.size() != BaseType::mEquationSystemSize) {
Dx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if(BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize,false);
}
//swapping pointers
// pA.swap(pNewA);
// pDx.swap(pNewDx);
// pb.swap(pNewb);
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
//**************************************************************************
//**************************************************************************
void Clear() override
{
this->mDofSet = DofsArrayType();
if(this->mpReactionsVector != NULL)
{
TSparseSpace::Clear( (this->mpReactionsVector) );
}
// *(this->mpReactionsVector) = TSystemVectorType();
if (this->GetEchoLevel()>1)
{
KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolverComponentwise";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
void ConstructGraph(TSystemMatrixType& A)
{
KRATOS_TRY
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int total_size = 0;
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
int index_i;
for(GlobalPointersVector< Node<3> >::iterator in = mActiveNodes.begin();
in!=mActiveNodes.end(); in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
total_size += indices.size();
}
}
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
#ifdef _OPENMP
void ParallelConstructGraph(TSystemMatrixType& A)
{
#ifndef __SUNPRO_CC
KRATOS_TRY
#endif
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int number_of_threads = omp_get_max_threads();
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
DenseVector<unsigned int> partition;
DenseVector<unsigned int> local_sizes(number_of_threads);
for(int i=0; i<number_of_threads; i++)
local_sizes[i] = 0;
CreatePartition(number_of_threads, mActiveNodes.size(), partition);
#pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
GlobalPointersVector< Node<3> >::iterator it_begin = mActiveNodes.begin()+partition[k];
GlobalPointersVector< Node<3> >::iterator it_end = mActiveNodes.begin()+partition[k+1];
for(GlobalPointersVector< Node<3> >::iterator in = it_begin;
in!=it_end; in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
int index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
local_sizes[k] += indices.size();
}
}
}
//calculate the total size of the system
int total_size = 0.0;
for(int i=0; i<number_of_threads; i++)
total_size += local_sizes[i];
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
#endif
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
TVariableType const & rVar;
GlobalPointersVector<Node<3> > mActiveNodes;
/*@} */
/**@name Private Operators*/
/*@{ */
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ResidualBasedEliminationBuilderAndSolverComponentwise */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
|
utils.c | /* File: utils.c */
/*
This file is a part of the Corrfunc package
Copyright (C) 2015-- Manodeep Sinha (manodeep@gmail.com)
License: MIT LICENSE. See LICENSE file under the top-level
directory at https://github.com/manodeep/Corrfunc/
*/
/*
A collection of C wrappers I use. Should be
very obvious. The ones that are not obvious
have comments before the function itself.
Bugs:
Please email me manodeep at gmail dot com
Ver 1.0: Manodeep Sinha, 2nd April, 2012
Ver 1.1: Manodeep Sinha, 14th June, 2012 - replaced
check_string_copy with a "real" wrapper to
snprintf.
Ver 1.2: Manodeep Sinha, Jan 8, 2012 - replaced
print_time with timeval and gettimeofday
*/
#include<inttypes.h>//defines PRId64 for printing int64_t + includes stdint.h
#include<math.h>
#include<string.h>
#include<limits.h>
#include<stdarg.h>
#include<ctype.h>
#include "macros.h"
#include "utils.h"
//#include "defs.h"
#ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
#include <mach/mach_time.h> /* mach_absolute_time -> really fast */
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
void get_max_float(const int64_t ND1, const float *cz1, float *czmax)
{
float max=*czmax;
for(int64_t i=0;i<ND1;i++) {
if(cz1[i] > max) max = cz1[i];
}
*czmax = max;
}
void get_max_double(const int64_t ND1, const double *cz1, double *czmax)
{
double max=*czmax;
for(int64_t i=0;i<ND1;i++) {
if(cz1[i] > max) max = cz1[i];
}
*czmax = max;
}
int run_system_call(const char *execstring)
{
int status = system(execstring);
if(status != EXIT_SUCCESS) {
fprintf(stderr,"ERROR: executing system command: \n`%s'\n\n",execstring);
perror(NULL);
}
return EXIT_FAILURE;
}
FILE * my_fopen(const char *fname,const char *mode)
{
FILE *fp = fopen(fname,mode);
if(fp == NULL){
fprintf(stderr,"Could not open file `%s'\n",fname);
perror(NULL);
}
return fp;//Could be NULL
}
/*
The following function opens a file (if it already exists)
in append mode. If the file doesn't exist, then the function
creates one, calls the *header() function [which presumably
prints a header to the file] and then returns the file pointer.
As usual, you need to be careful with the file you are appending
to -> otherwise you might end up with a ginormous file. Usually,
I do a system("rm -f filename") before the loop where the file
might be created/modified and remove the file from previous
runs.
*/
FILE * my_fopen_carefully(const char *fname,void (*header)(FILE *))
{
FILE *fp = fopen(fname,"r");//note I am using fopen and not my_fopen.
if(fp == NULL) {
/*file does not exist -> open with "w" */
fp = my_fopen(fname,"w");//using my_fopen here.
if(fp != NULL) {
(*header)(fp);/* print the header */
}
} else {
fclose(fp);
fp = my_fopen(fname,"a+");//open with append mode
}
return fp;
}
size_t my_fwrite(void *ptr, size_t size, size_t nmemb, FILE *stream)
{
size_t nwritten;
nwritten = fwrite(ptr, size, nmemb, stream);
if(nwritten != nmemb){
fprintf(stderr,"I/O error (fwrite) has occured.\n");
fprintf(stderr,"Instead of reading nmemb=%zu, I got nread = %zu \n",nmemb,nwritten);
perror(NULL);
return -1;
}
return nwritten;
}
size_t my_fread(void *ptr, size_t size, size_t nmemb, FILE *stream)
{
size_t nread;
nread = fread(ptr, size, nmemb, stream);
if(nread != nmemb) {
fprintf(stderr,"I/O error (fread) has occured.\n");
fprintf(stderr,"Instead of reading nmemb=%zu, I got nread = %zu\n",nmemb,nread);
perror(NULL);
return -1;
}
return nread;
}
int my_fseek(FILE *stream, long offset, int whence)
{
int err=fseek(stream,offset,whence);
if(err != 0) {
fprintf(stderr,"ERROR: Could not seek `%ld' bytes into the file..exiting\n",offset);
perror(NULL);
}
return err;
}
// A real wrapper to snprintf that will exit() if the allocated buffer length
// was not sufficient. Usage is the same as snprintf
int my_snprintf(char *buffer,int len,const char *format, ...)
{
va_list args;
int nwritten=0;
va_start(args,format);
nwritten=vsnprintf(buffer, (size_t) len, format, args );
va_end(args);
if (nwritten > len || nwritten < 0) {
fprintf(stderr,"ERROR: printing to string failed (wrote %d characters while only %d characters were allocated)\n",nwritten,len);
fprintf(stderr,"Increase `len'=%d in the header file\n",len);
return -1;
}
return nwritten;
}
int is_big_endian(void)
{
union {
uint32_t i;
char c[4];
} e = { 0x01000000 };
return e.c[0];
}
void byte_swap(char * const in, const size_t size, char *out)
{
if(size > 16) {
fprintf(stderr,"WARNING: In %s> About to byte_swap %zu bytes but no intrinsic C data-type exists with size larger than 16 bytes",
__FUNCTION__, size);
}
//point to the last byte
char *in_char = (char *) in + (size - 1UL);
//point to the first byte in output
char *out_char = out;
//Start filling in from the front in the output string
//taking input from the end of the input
for(size_t i=0;i<size;i++) {
*out_char = *in_char;
out_char++;
in_char--;
}
}
//Taken from the inter-webs: http://stackoverflow.com/questions/1024389/print-an-int-in-binary-representation-using-c
char * int2bin(int a, char *buffer, int buf_size)
{
buffer += (buf_size - 1);
for (int i = 31; i >= 0; i--) {
*buffer-- = (a & 1) + '0';
a >>= 1;
}
return buffer;
}
/*
Can not remember where I (MS) got this from. Fairly sure
stackoverflow was involved.
Finally taken from http://stackoverflow.com/a/6719178/2237582 */
void current_utc_time(struct timespec *ts)
{
#ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
static mach_timebase_info_data_t sTimebaseInfo = {.numer=0, .denom=0};
uint64_t start = mach_absolute_time();
if ( sTimebaseInfo.denom == 0 ) {
mach_timebase_info(&sTimebaseInfo);
}
ts->tv_sec = 0;//(start * sTimebaseInfo.numer/sTimebaseInfo.denom) * tv_nsec;
ts->tv_nsec = start * sTimebaseInfo.numer / sTimebaseInfo.denom;
#if 0
//Much slower implementation for clock
//Slows down the code by up to 4x
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
ts->tv_sec = mts.tv_sec;
ts->tv_nsec = mts.tv_nsec;
#endif
#else
clock_gettime(CLOCK_REALTIME, ts);
#endif
}
/*
I like this particular function. Generic replacement for printing
(in meaningful units) the actual execution time of a code/code segment.
The function call should be like this:
---------------------------
struct timeval t_start,t_end;
gettimeofday(&t_start,NULL);
do_something();
gettimeofday(&t_end,NULL);
print_time(t_start,t_end,"do something");
---------------------------
if the code took 220 mins 30.1 secs
-> print_time will output `Time taken to execute `do something' = 3 hours 40 mins 30.1 seconds
(code can be easily extended to include `weeks' as a system of time unit. left to the reader)
*/
char * get_time_string(struct timeval t0,struct timeval t1)
{
const size_t MAXLINESIZE = 1024;
char *time_string = my_malloc(sizeof(char), MAXLINESIZE);
double timediff = t1.tv_sec - t0.tv_sec;
double ratios[] = {24*3600.0, 3600.0, 60.0, 1};
if(timediff < ratios[2]) {
my_snprintf(time_string, MAXLINESIZE,"%6.3lf secs",1e-6*(t1.tv_usec-t0.tv_usec) + timediff);
} else {
double timeleft = timediff;
size_t curr_index = 0;
int which = 0;
while (which < 4) {
double time_to_print = floor(timeleft/ratios[which]);
if (time_to_print > 1) {
timeleft -= (time_to_print*ratios[which]);
char units[4][10] = {"days", "hrs" , "mins", "secs"};
char tmp[MAXLINESIZE];
my_snprintf(tmp, MAXLINESIZE, "%5d %s",(int)time_to_print,units[which]);
const size_t len = strlen(tmp);
const size_t required_len = curr_index + len + 1;
XRETURN(MAXLINESIZE >= required_len, NULL,
"buffer overflow will occur: string has space for %zu bytes while concatenating requires at least %zu bytes\n",
MAXLINESIZE, required_len);
strcpy(time_string + curr_index, tmp);
curr_index += len;
}
which++;
}
}
return time_string;
}
void print_time(struct timeval t0,struct timeval t1,const char *s)
{
double timediff = t1.tv_sec - t0.tv_sec;
double ratios[] = {24*3600.0, 3600.0, 60.0, 1};
fprintf(stderr,"Time taken to execute '%s' = ",s);
if(timediff < ratios[2]) {
fprintf(stderr,"%6.3lf secs",1e-6*(t1.tv_usec-t0.tv_usec) + timediff);
} else {
double timeleft = timediff;
int which = 0;
while (which < 4) {
double time_to_print = floor(timeleft/ratios[which]);
if (time_to_print > 1) {
char units[4][10] = {"days", "hrs" , "mins", "secs"};
timeleft -= (time_to_print*ratios[which]);
fprintf(stderr,"%5d %s",(int)time_to_print,units[which]);
}
which++;
}
}
fprintf(stderr,"\n");
}
//wrapper for realloc. varname should contain the name of the
//variable being re-allocated -> helps debugging in case of a crash.
void* my_realloc(void *x,size_t size,int64_t N,const char *varname)
{
void *tmp = realloc(x,N*size);
if (tmp==NULL) {
fprintf(stderr,"ERROR: Could not reallocate for %"PRId64" elements with %zu size for variable `%s' ..aborting\n",N,size,varname);
perror(NULL);
}
return tmp;
}
void* my_malloc(size_t size,int64_t N)
{
void *x = malloc(N*size);
if (x==NULL){
fprintf(stderr,"malloc for %"PRId64" elements with %zu bytes failed...\n",N,size);
perror(NULL);
}
return x;
}
void* my_calloc(size_t size, int64_t N)
{
void *x = calloc((size_t) N, size);
if (x==NULL) {
fprintf(stderr,"malloc for %"PRId64" elements with %zu size failed...\n",N,size);
perror(NULL);
}
return x;
}
//real free. Use only if you are going to check the
//pointer variable afterwards for NULL.
void my_free(void ** x)
{
/* my_free(void *x) would also free the
memory but then x would be a local variable
and the pointer itself in the calling routine
could not be set to NULL. Hence the pointer
to pointer business.
*/
if(*x!=NULL)
free(*x);//free the memory
*x=NULL;//sets the pointer in the calling routine to NULL.
}
void **matrix_malloc(size_t size,int64_t nrow,int64_t ncol)
{
void **m = (void **) my_malloc(sizeof(void *),nrow);
if(m == NULL) {
return NULL;
}
for(int i=0;i<nrow;i++) {
m[i] = (void *) my_malloc(size,ncol);
/* Check if allocation failed */
if(m[i] == NULL) {
/* Free up all the memory allocated so far */
for(int j=i-1;j>=0;j--) {
free(m[j]);
}
free(m);
return NULL;
}
}
return m;
}
void **matrix_calloc(size_t size,int64_t nrow,int64_t ncol)
{
void **m = (void **) my_calloc(sizeof(void *),nrow);
if(m == NULL) {
return m;
}
for(int i=0;i<nrow;i++) {
m[i] = (void *) my_calloc(size,ncol);
/* Check if allocation failed */
if(m[i] == NULL) {
/* Free up all the memory allocated so far */
for(int j=i-1;j>=0;j--) {
free(m[j]);
}
free(m);
return NULL;
}
}
return m;
}
// Resize a matrix. Returns EXIT_SUCCESS or EXIT_FAILURE.
// Presently only resizing the last dimension is supported, due to
// potential memory leaks when shrinking the first dimension
int matrix_realloc(void **matrix, size_t size, int64_t nrow, int64_t ncol){
void *tmp;
for(int i = 0; i < nrow; i++){
tmp = my_realloc(matrix[i], size, ncol, "matrix_realloc");
if(tmp == NULL){
return EXIT_FAILURE;
}
matrix[i] = tmp;
}
return EXIT_SUCCESS;
}
void matrix_free(void **m,int64_t nrow)
{
if(m == NULL)
return;
for(int i=0;i<nrow;i++)
free(m[i]);
free(m);
}
void *** volume_malloc(size_t size,int64_t nrow,int64_t ncol,int64_t nframe)
{
void ***v = (void ***) my_malloc(sizeof(void **),nrow);
if( v == NULL) {
return NULL;
}
for(int i=0;i<nrow;i++) {
v[i] = (void *) my_malloc(sizeof(void *),ncol);
if(v[i] == NULL) {
/* Free up all the memory allocated so far */
for(int jj=i-1;jj>=0;jj--) {
for(int k=0;k<ncol;k++) {
free(v[jj][k]);
}
}
free(v);
return NULL;
}
for(int j=0;j<ncol;j++) {
v[i][j] = my_malloc(size,nframe);
if(v[i][j] == NULL) {
/* Free up all the memory allocated so far */
/* First free up all columns in this row*/
for(int k=ncol-1;k>=0;k--) {
free(v[i][k]);
}
/* Now free all previous rows with all ncols */
for(int jj=i-1;jj>=0;jj--) {
for(int k=0;k<ncol;k++) {
free(v[jj][k]);
}
}
free(v);
return NULL;
}
}
}
return v;
}
void *** volume_calloc(size_t size,int64_t nrow,int64_t ncol,int64_t nframe)
{
void ***v = (void ***) my_malloc(sizeof(void **),nrow);
if(v == NULL) {
return NULL;
}
for(int i=0;i<nrow;i++) {
v[i] = (void *) my_malloc(sizeof(void *),ncol);
if(v[i] == NULL) {
/* Free up all the memory allocated so far */
for(int jj=i-1;jj>=0;jj--) {
for(int k=0;k<ncol;k++) {
free(v[jj][k]);
}
}
free(v);
return NULL;
}
for(int j=0;j<ncol;j++) {
v[i][j] = my_calloc(size,nframe);
if(v[i][j] == NULL) {
/* Free up all the memory allocated so far */
/* First free up all columns in this row*/
for(int k=ncol-1;k>=0;k--) {
free(v[i][k]);
}
/* Now free all previous rows with all ncols */
for(int jj=i-1;jj>=0;jj--) {
for(int k=0;k<ncol;k++) {
free(v[j][k]);
}
}
free(v);
return NULL;
}
}
}
return v;
}
void volume_free(void ***v,int64_t nrow,int64_t ncol)
{
for(int i=0;i<nrow;i++) {
for(int j=0;j<ncol;j++) {
free(v[i][j]);
}
free(v[i]);
}
free(v);
}
int64_t getnumlines(const char *fname, const char comment)
{
const int MAXLINESIZE = 10000;
int64_t nlines=0;
char str_line[MAXLINESIZE];
FILE *fp = my_fopen(fname,"rt");
if(fp == NULL) {
return -1;
}
while(1){
if(fgets(str_line, MAXLINESIZE,fp)!=NULL) {
/*
fgets always terminates the string with a '\0'
on a successful read
*/
char *c = &str_line[0];
while(*c != '\0' && isspace(*c)) {
c++;
}
if(*c != '\0' && *c !=comment) {
nlines++;
}
} else {
break;
}
}
fclose(fp);
return nlines;
}
/* int float_almost_equal(const float A, const float B, int maxUlps) */
/* { */
/* /\* MS -- taken from */
/* http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm */
/* *\/ */
/* const int upper_limit_maxulps = 4 * 1024 * 1024; */
/* /\* Make sure maxUlps is non-negative and small enough that the */
/* default NAN won't compare as equal to anything.*\/ */
/* if(maxUlps <= 0 || maxUlps >= upper_limit_maxulps){ */
/* fprintf(stderr,"Error: Comparison between floats should have smaller number of max. units in last place. Found maxUlps = %d (max allowed = %d)\n", */
/* maxUlps, upper_limit_maxulps); */
/* return EXIT_FAILURE; */
/* } */
/* int aInt = *(int*)&A; */
/* /\* Make aInt lexicographically ordered as a twos-complement int*\/ */
/* if (aInt < 0) */
/* aInt = 0x80000000 - aInt; */
/* /\* Make bInt lexicographically ordered as a twos-complement int*\/ */
/* int bInt = *(int*)&B; */
/* if (bInt < 0) */
/* bInt = 0x80000000 - bInt; */
/* int intDiff = abs(aInt - bInt); */
/* if (intDiff <= maxUlps) */
/* return 1; */
/* return 0; */
/* } */
/* Directly taken from https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ */
int AlmostEqualRelativeAndAbs_float(float A, float B,
const float maxDiff,
const float maxRelDiff)
{
// Check if the numbers are really close -- needed
// when comparing numbers near zero.
float diff = fabsf(A - B);
if (diff <= maxDiff)
return EXIT_SUCCESS;
A = fabsf(A);
B = fabsf(B);
float largest = (B > A) ? B : A;
if (diff <= largest * maxRelDiff)
return EXIT_SUCCESS;
return EXIT_FAILURE;
}
int AlmostEqualRelativeAndAbs_double(double A, double B,
const double maxDiff,
const double maxRelDiff)
{
// Check if the numbers are really close -- needed
// when comparing numbers near zero.
double diff = fabs(A - B);
if (diff <= maxDiff)
return EXIT_SUCCESS;
A = fabs(A);
B = fabs(B);
double largest = (B > A) ? B : A;
if (diff <= largest * maxRelDiff)
return EXIT_SUCCESS;
/* fprintf(stderr,"diff = %e largest * maxRelDiff = %e\n", diff, largest * maxRelDiff); */
return EXIT_FAILURE;
}
/* #undef __USE_XOPEN2K */
/* A parallel cumulative sum
Output convention is: cumsum[0] = 0; cumsum[N-1] = sum(a[0:N-1]);
The algorithm is:
- Divide the array into `nthreads` chunks
- cumsum within each chunk
- compute the "offset" for each chunk by summing the cumsum at the tail of all previous chunks
- apply the offset
*/
void parallel_cumsum(const int64_t *a, const int64_t N, int64_t *cumsum){
if (N <= 0){
return; // nothing to do
}
#ifdef _OPENMP
int nthreads = omp_get_max_threads();
#else
int nthreads = 1;
#endif
// We will heuristically limit the number of threads
// if there isn't enough work for multithreading to be efficient.
// This is also important for the correctness of the algorithm below,
// since it enforces nthreads <= N
int64_t min_N_per_thread = 10000;
if(N/min_N_per_thread < nthreads){
nthreads = N/min_N_per_thread;
}
if(nthreads < 1){
nthreads = 1;
}
#ifdef _OPENMP
#pragma omp parallel num_threads(nthreads)
#endif
{
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int64_t cstart = N*tid/nthreads;
int64_t cend = N*(tid+1)/nthreads;
cumsum[cstart] = cstart > 0 ? a[cstart-1] : 0;
for(int64_t c = cstart+1; c < cend; c++){
cumsum[c] = a[c-1] + cumsum[c-1];
}
#ifdef _OPENMP
#pragma omp barrier
#endif
int64_t offset = 0;
for(int t = 0; t < tid; t++){
offset += cumsum[N*(t+1)/nthreads-1];
}
#ifdef _OPENMP
#pragma omp barrier
#endif
if(offset != 0){
for(int64_t c = cstart; c < cend; c++){
cumsum[c] += offset;
}
}
}
}
|
additionally.c | #include <unistd.h>
#include "additionally.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
// global GPU index: cuda.c
int gpu_index = 0;
// im2col.c
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
// im2col.c
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c, h, w;
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
// fuse convolutional and batch_norm weights into one convolutional-layer
void yolov2_fuse_conv_batchnorm(network net)
{
int j;
for (j = 0; j < net.n; ++j) {
layer *l = &net.layers[j];
if (l->type == CONVOLUTIONAL) {
//printf(" Fuse Convolutional layer \t\t l->size = %d \n", l->size);
if (l->batch_normalize) {
int f;
for (f = 0; f < l->n; ++f)
{
l->biases[f] = l->biases[f] - l->scales[f] * l->rolling_mean[f] / (sqrtf(l->rolling_variance[f]) + .000001f);
const size_t filter_size = l->size*l->size*l->c;
int i;
for (i = 0; i < filter_size; ++i) {
int w_index = f*filter_size + i;
l->weights[w_index] = l->weights[w_index] * l->scales[f] / (sqrtf(l->rolling_variance[f]) + .000001f);
}
}
l->batch_normalize = 0;
}
}
}
}
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void get_mean_array(float *src, size_t size, size_t filters, float *mean_arr) {
size_t i, counter;
counter = 0;
for (i = 0; i < size; i += size / filters) {
mean_arr[counter++] = fabs(src[i]);
}
}
void forward_blank_layer(layer l, network_state state) {}
static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
}
static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
// popcnt 32 bit
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
register uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
register uint32_t B_PART = ((uint32_t*)B)[j*ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
// transpose uint32_t matrix
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
// convolution repacked bit matrix (32 channels -> 1 uint32_t) XNOR-net
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, c_pack, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
// -------------- blas.c --------------
#ifdef AVX
#ifdef _WIN64
// Windows
#include <intrin.h>
#else
// Linux
#include <x86intrin.h>
#endif
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <emmintrin.h>
// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
register uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8(255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
register uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
#if defined(_MSC_VER)
// Windows
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else
// Linux
static inline float _mm256_extract_float32(__m256 a, const int index) {
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index));
}
#endif
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)// && is_fma_avx())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t *dst_ptr = &((unsigned char*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit(data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if (stride == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
indexes[out_index] = max_i;
}
}
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#ifdef _MSC_VER
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i)
{ // l.n - filters [16 - 55 - 1024]
float mean_val = mean_arr[i];
int j, k;
__m256i all_1 = _mm256_set1_epi8(255);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256 = _mm256_loadu_si256((__m256i *)(A + (i*lda + k) / 8));
__m256i b_bit256 = _mm256_loadu_si256((__m256i *)(B + (j*ldb + k) / 8));
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
__m256i c_bit256 = _mm256_andnot_si256(xor256, all_1); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
count_sum = _mm256_add_epi64(count256(c_bit256), count_sum); // Mula�s algorithm
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
// count of 1 bits
//count = count_sum.m256i_i64[0] +
// count_sum.m256i_i64[1] +
// count_sum.m256i_i64[2] +
// count_sum.m256i_i64[3];
count = _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i += 8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
#else // AVX
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
register uint32_t A_PART = A[i*lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
register uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit(data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit(data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit(data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR) {}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
indexes[out_index] = max_i;
}
}
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#ifdef __x86_64__ // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i, j, k, h;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char *byte_arr = calloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
#endif // __x86_64
/*
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
*/
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for (i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
// -------------- utils.c --------------
// utils.c
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
// utils.c
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
// utils.c
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
// utils.c
char *fgetl(FILE *fp)
{
if (feof(fp)) return 0;
size_t size = 512;
char *line = malloc(size * sizeof(char));
if (!fgets(line, size, fp)) {
free(line);
return 0;
}
size_t curr = strlen(line);
while ((line[curr - 1] != '\n') && !feof(fp)) {
if (curr == size - 1) {
size *= 2;
line = realloc(line, size * sizeof(char));
if (!line) {
printf("%ld\n", (int long)size);
malloc_error();
}
}
size_t readsize = size - curr;
if (readsize > INT_MAX) readsize = INT_MAX - 1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if (line[curr - 1] == '\n') line[curr - 1] = '\0';
return line;
}
// utils.c
int *read_map(char *filename)
{
int n = 0;
int *map = 0;
char *str;
FILE *file = fopen(filename, "r");
if (!file) file_error(filename);
while ((str = fgetl(file))) {
++n;
map = realloc(map, n * sizeof(int));
map[n - 1] = atoi(str);
}
return map;
}
// utils.c
void del_arg(int argc, char **argv, int index)
{
int i;
for (i = index; i < argc - 1; ++i) argv[i] = argv[i + 1];
argv[i] = 0;
}
// utils.c
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for (i = 0; i < argc; ++i) {
if (!argv[i]) continue;
if (0 == strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
// utils.c
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for (i = 0; i < argc - 1; ++i) {
if (!argv[i]) continue;
if (0 == strcmp(argv[i], arg)) {
def = atoi(argv[i + 1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
// utils.c
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for (i = 0; i < argc - 1; ++i) {
if (!argv[i]) continue;
if (0 == strcmp(argv[i], arg)) {
def = atof(argv[i + 1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
// utils.c
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for (i = 0; i < argc - 1; ++i) {
if (!argv[i]) continue;
if (0 == strcmp(argv[i], arg)) {
def = argv[i + 1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
// utils.c
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for (i = 0; i < len; ++i) {
char c = s[i];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') ++offset;
else s[i - offset] = c;
}
s[len - offset] = '\0';
}
// utils.c
void list_insert(list *l, void *val)
{
node *new = malloc(sizeof(node));
new->val = val;
new->next = 0;
if (!l->back) {
l->front = new;
new->prev = 0;
}
else {
l->back->next = new;
new->prev = l->back;
}
l->back = new;
++l->size;
}
// utils.c
float rand_uniform(float min, float max)
{
if (max < min) {
float swap = min;
min = max;
max = swap;
}
return ((float)rand() / RAND_MAX * (max - min)) + min;
}
// utils.c
float rand_scale(float s)
{
float scale = rand_uniform(1, s);
if (rand() % 2) return scale;
return 1. / scale;
}
// utils.c
int rand_int(int min, int max)
{
if (max < min) {
int s = min;
min = max;
max = s;
}
int r = (rand() % (max - min + 1)) + min;
return r;
}
// utils.c
int constrain_int(int a, int min, int max)
{
if (a < min) return min;
if (a > max) return max;
return a;
}
// utils.c
float dist_array(float *a, float *b, int n, int sub)
{
int i;
float sum = 0;
for (i = 0; i < n; i += sub) sum += powf(a[i] - b[i], 2);
return sqrt(sum);
}
// utils.c
float mag_array(float *a, int n)
{
int i;
float sum = 0;
for (i = 0; i < n; ++i) {
sum += a[i] * a[i];
}
return sqrt(sum);
}
// utils.c
int max_index(float *a, int n)
{
if (n <= 0) return -1;
int i, max_i = 0;
float max = a[0];
for (i = 1; i < n; ++i) {
if (a[i] > max) {
max = a[i];
max_i = i;
}
}
return max_i;
}
// utils.c
// From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
float rand_normal()
{
static int haveSpare = 0;
static double rand1, rand2;
if (haveSpare)
{
haveSpare = 0;
return sqrt(rand1) * sin(rand2);
}
haveSpare = 1;
rand1 = rand() / ((double)RAND_MAX);
if (rand1 < 1e-100) rand1 = 1e-100;
rand1 = -2 * log(rand1);
rand2 = (rand() / ((double)RAND_MAX)) * TWO_PI;
return sqrt(rand1) * cos(rand2);
}
// utils.c
void free_ptrs(void **ptrs, int n)
{
int i;
for (i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
// -------------- tree.c --------------
// tree.c
void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves)
{
int j;
for (j = 0; j < n; ++j) {
int parent = hier->parent[j];
if (parent >= 0) {
predictions[j] *= predictions[parent];
}
}
if (only_leaves) {
for (j = 0; j < n; ++j) {
if (!hier->leaf[j]) predictions[j] = 0;
}
}
}
// tree.c
tree *read_tree(char *filename)
{
tree t = { 0 };
FILE *fp = fopen(filename, "r");
char *line;
int last_parent = -1;
int group_size = 0;
int groups = 0;
int n = 0;
while ((line = fgetl(fp)) != 0) {
char *id = calloc(256, sizeof(char));
int parent = -1;
sscanf(line, "%s %d", id, &parent);
t.parent = realloc(t.parent, (n + 1) * sizeof(int));
t.parent[n] = parent;
t.name = realloc(t.name, (n + 1) * sizeof(char *));
t.name[n] = id;
if (parent != last_parent) {
++groups;
t.group_offset = realloc(t.group_offset, groups * sizeof(int));
t.group_offset[groups - 1] = n - group_size;
t.group_size = realloc(t.group_size, groups * sizeof(int));
t.group_size[groups - 1] = group_size;
group_size = 0;
last_parent = parent;
}
t.group = realloc(t.group, (n + 1) * sizeof(int));
t.group[n] = groups;
++n;
++group_size;
}
++groups;
t.group_offset = realloc(t.group_offset, groups * sizeof(int));
t.group_offset[groups - 1] = n - group_size;
t.group_size = realloc(t.group_size, groups * sizeof(int));
t.group_size[groups - 1] = group_size;
t.n = n;
t.groups = groups;
t.leaf = calloc(n, sizeof(int));
int i;
for (i = 0; i < n; ++i) t.leaf[i] = 1;
for (i = 0; i < n; ++i) if (t.parent[i] >= 0) t.leaf[t.parent[i]] = 0;
fclose(fp);
tree *tree_ptr = calloc(1, sizeof(tree));
*tree_ptr = t;
//error(0);
return tree_ptr;
}
// -------------- list.c --------------
// list.c
list *make_list()
{
list *l = malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
// list.c
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if (!file) file_error(filename);
list *lines = make_list();
while ((path = fgetl(file))) {
list_insert(lines, path);
}
fclose(file);
return lines;
}
// list.c
void **list_to_array(list *l)
{
void **a = calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while (n) {
a[count++] = n->val;
n = n->next;
}
return a;
}
// list.c
void free_node(node *n)
{
node *next;
while (n) {
next = n->next;
free(n);
n = next;
}
}
// list.c
void free_list(list *l)
{
free_node(l->front);
free(l);
}
// list.c
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
// -------------- network.c --------------
// network.c
float *get_network_output(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].output;
}
// network.c
int get_network_output_size(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].outputs;
}
// network.c
network make_network(int n)
{
network net = { 0 };
net.n = n;
net.layers = calloc(net.n, sizeof(layer));
net.seen = calloc(1, sizeof(uint64_t));
return net;
}
void free_network(network net)
{
int i;
for (i = 0; i < net.n; ++i) {
free_layer(net.layers[i]);
}
free(net.layers);
free(net.scales);
free(net.steps);
free(net.seen);
free(net.workspace);
}
// network.c
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for (i = 0; i < net->n; ++i) {
layer l = net->layers[i];
l.batch = b;
}
}
// -------------- layer.c --------------
void free_layer(layer l)
{
if (l.type == DROPOUT) {
if (l.rand) free(l.rand);
return;
}
if (l.cweights) free(l.cweights);
if (l.indexes) free(l.indexes);
if (l.input_layers) free(l.input_layers);
if (l.input_sizes) free(l.input_sizes);
if (l.map) free(l.map);
if (l.rand) free(l.rand);
if (l.cost) free(l.cost);
if (l.state) free(l.state);
if (l.prev_state) free(l.prev_state);
if (l.forgot_state) free(l.forgot_state);
if (l.forgot_delta) free(l.forgot_delta);
if (l.state_delta) free(l.state_delta);
if (l.concat) free(l.concat);
if (l.concat_delta) free(l.concat_delta);
if (l.biases) free(l.biases);
if (l.biases_quant) free(l.biases_quant);
if (l.scales) free(l.scales);
if (l.weights) free(l.weights);
if (l.weights_int8) free(l.weights_int8);
if (l.align_bit_weights) free(l.align_bit_weights);
if (l.mean_arr) free(l.mean_arr);
if (l.output) free(l.output);
if (l.output_int8) free(l.output_int8);
if (l.squared) free(l.squared);
if (l.norms) free(l.norms);
if (l.spatial_mean) free(l.spatial_mean);
if (l.mean) free(l.mean);
if (l.variance) free(l.variance);
if (l.rolling_mean) free(l.rolling_mean);
if (l.rolling_variance) free(l.rolling_variance);
if (l.x) free(l.x);
if (l.x_norm) free(l.x_norm);
if (l.m) free(l.m);
if (l.v) free(l.v);
if (l.z_cpu) free(l.z_cpu);
if (l.r_cpu) free(l.r_cpu);
if (l.h_cpu) free(l.h_cpu);
if (l.mask) free(l.mask);
}
// -------------- softmax_layer.c --------------
// softmax_layer.c
softmax_layer make_softmax_layer(int batch, int inputs, int groups)
{
assert(inputs%groups == 0);
fprintf(stderr, "softmax %4d\n", inputs);
softmax_layer l = { 0 };
l.type = SOFTMAX;
l.batch = batch;
l.groups = groups;
l.inputs = inputs;
l.outputs = inputs;
l.output = calloc(inputs*batch, sizeof(float));
return l;
}
// -------------- upsample_layer.c --------------
// upsample_layer.c
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l = { 0 };
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if (stride < 0) {
stride = -stride;
l.reverse = 1;
l.out_w = w / stride;
l.out_h = h / stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = calloc(l.outputs*batch, sizeof(float));;
if (l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
// -------------- shortcut_layer.c --------------
// shortcut.c
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "Shortcut Layer: %d\n", index);
layer l = { 0 };
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = calloc(l.outputs*batch, sizeof(float));
return l;
}
// -------------- reorg_layer.c --------------
// reorg_layer.c
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse)
{
layer l = { 0 };
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.h = h;
l.w = w;
l.c = c;
if (reverse) {
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c / (stride*stride);
}
else {
l.out_w = w / stride;
l.out_h = h / stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
int output_size = l.out_h * l.out_w * l.out_c * batch;
l.output = calloc(output_size, sizeof(float));
l.output_int8 = calloc(output_size, sizeof(int8_t));
return l;
}
// -------------- route_layer.c --------------
// route_layer.c
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr, "route ");
route_layer l = { 0 };
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for (i = 0; i < n; ++i) {
fprintf(stderr, " %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
//l.delta = calloc(outputs*batch, sizeof(float));
l.output = calloc(outputs*batch, sizeof(float));
l.output_int8 = calloc(outputs*batch, sizeof(int8_t));
return l;
}
// -------------- yolo_layer.c --------------
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes, int max_boxes)
{
int i;
layer l = { 0 };
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.cost = calloc(1, sizeof(float));
l.biases = calloc(total * 2, sizeof(float));
if (mask) l.mask = mask;
else {
l.mask = calloc(n, sizeof(int));
for (i = 0; i < n; ++i) {
l.mask[i] = i;
}
}
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
l.max_boxes = max_boxes;
l.truths = l.max_boxes*(4 + 1); // 90*(4 + 1);
l.output = calloc(batch*l.outputs, sizeof(float));
for (i = 0; i < total * 2; ++i) {
l.biases[i] = .5;
}
fprintf(stderr, "yolo\n");
srand(0);
return l;
}
// -------------- region_layer.c --------------
// region_layer.c
region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
region_layer l = { 0 };
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.classes = classes;
l.coords = coords;
l.cost = calloc(1, sizeof(float));
l.biases = calloc(n * 2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30 * (5);
l.output = calloc(batch*l.outputs, sizeof(float));
int i;
for (i = 0; i < n * 2; ++i) {
l.biases[i] = .5;
}
fprintf(stderr, "detection\n");
srand(0);
return l;
}
// -------------- maxpool_layer.c --------------
// maxpool_layer.c
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
maxpool_layer l = { 0 };
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + padding - size) / stride + 1;
l.out_h = (h + padding - size) / stride + 1;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
l.indexes = calloc(output_size, sizeof(int));
l.output = calloc(output_size, sizeof(float));
l.output_int8 = calloc(output_size, sizeof(int8_t));
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
// -------------- convolutional_layer.c --------------
// convolutional_layer.c
size_t get_workspace_size(layer l) {
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c * sizeof(float);
}
int convolutional_out_height(convolutional_layer l)
{
return (l.h + 2 * l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(convolutional_layer l)
{
return (l.w + 2 * l.pad - l.size) / l.stride + 1;
}
// convolutional_layer.c
convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam, int quantized)
{
int i;
convolutional_layer l = { 0 };
l.type = CONVOLUTIONAL;
l.quantized = quantized;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
l.weights = calloc(c*n*size*size, sizeof(float));
l.weights_int8 = calloc(c*n*size*size, sizeof(int8_t));
l.biases = calloc(n, sizeof(float));
l.biases_quant = calloc(n, sizeof(float));
float scale = sqrt(2. / (size*size*c));
for (i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);
int out_h = convolutional_out_height(l);
int out_w = convolutional_out_width(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
l.output = calloc(l.batch*l.outputs, sizeof(float));
l.output_int8 = calloc(l.batch*l.outputs, sizeof(int8_t));
if (batch_normalize) {
l.scales = calloc(n, sizeof(float));
for (i = 0; i < n; ++i) {
l.scales[i] = 1;
}
l.mean = calloc(n, sizeof(float));
l.variance = calloc(n, sizeof(float));
l.rolling_mean = calloc(n, sizeof(float));
l.rolling_variance = calloc(n, sizeof(float));
l.x = calloc(l.batch*l.outputs, sizeof(float));
l.x_norm = calloc(l.batch*l.outputs, sizeof(float));
}
if (adam) {
l.adam = 1;
l.m = calloc(c*n*size*size, sizeof(float));
l.v = calloc(c*n*size*size, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
fprintf(stderr, "conv ");
fprintf(stderr, "%5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
return l;
}
// -------------- image.c --------------
// image.c
void rgbgr_image(image im)
{
int i;
for (i = 0; i < im.w*im.h; ++i) {
float swap = im.data[i];
im.data[i] = im.data[i + im.w*im.h * 2];
im.data[i + im.w*im.h * 2] = swap;
}
}
// image.c
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
// image.c
void free_image(image m)
{
if (m.data) {
free(m.data);
}
}
// image.c
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if (x1 < 0) x1 = 0;
if (x1 >= a.w) x1 = a.w - 1;
if (x2 < 0) x2 = 0;
if (x2 >= a.w) x2 = a.w - 1;
if (y1 < 0) y1 = 0;
if (y1 >= a.h) y1 = a.h - 1;
if (y2 < 0) y2 = 0;
if (y2 >= a.h) y2 = a.h - 1;
for (i = x1; i <= x2; ++i) {
a.data[i + y1*a.w + 0 * a.w*a.h] = r;
a.data[i + y2*a.w + 0 * a.w*a.h] = r;
a.data[i + y1*a.w + 1 * a.w*a.h] = g;
a.data[i + y2*a.w + 1 * a.w*a.h] = g;
a.data[i + y1*a.w + 2 * a.w*a.h] = b;
a.data[i + y2*a.w + 2 * a.w*a.h] = b;
}
for (i = y1; i <= y2; ++i) {
a.data[x1 + i*a.w + 0 * a.w*a.h] = r;
a.data[x2 + i*a.w + 0 * a.w*a.h] = r;
a.data[x1 + i*a.w + 1 * a.w*a.h] = g;
a.data[x2 + i*a.w + 1 * a.w*a.h] = g;
a.data[x1 + i*a.w + 2 * a.w*a.h] = b;
a.data[x2 + i*a.w + 2 * a.w*a.h] = b;
}
}
// image.c
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for (i = 0; i < w; ++i) {
draw_box(a, x1 + i, y1 + i, x2 - i, y2 - i, r, g, b);
}
}
// image.c
image make_image(int w, int h, int c)
{
image out = make_empty_image(w, h, c);
out.data = calloc(h*w*c, sizeof(float));
return out;
}
// image.c
float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
// image.c
void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
// image.c
void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
// image.c
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for (k = 0; k < im.c; ++k) {
for (r = 0; r < im.h; ++r) {
for (c = 0; c < w; ++c) {
float val = 0;
if (c == w - 1 || im.w == 1) {
val = get_pixel(im, im.w - 1, r, k);
}
else {
float sx = c*w_scale;
int ix = (int)sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix + 1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for (k = 0; k < im.c; ++k) {
for (r = 0; r < h; ++r) {
float sy = r*h_scale;
int iy = (int)sy;
float dy = sy - iy;
for (c = 0; c < w; ++c) {
float val = (1 - dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if (r == h - 1 || im.h == 1) continue;
for (c = 0; c < w; ++c) {
float val = dy * get_pixel(part, c, iy + 1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
// image.c
image load_image(char *filename, int w, int h, int c)
{
image out = load_image_stb(filename, c);
if ((h && w) && (h != out.h || w != out.w)) {
image resized = resize_image(out, w, h);
free_image(out);
out = resized;
}
return out;
}
// image.c
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if (channels) c = channels;
int i, j, k;
image im = make_image(w, h, c);
for (k = 0; k < c; ++k) {
for (j = 0; j < h; ++j) {
for (i = 0; i < w; ++i) {
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index] / 255.;
}
}
}
free(data);
return im;
}
// image.c
image copy_image(image p)
{
image copy = p;
copy.data = calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c * sizeof(float));
return copy;
}
// image.c
void constrain_image(image im)
{
int i;
for (i = 0; i < im.w*im.h*im.c; ++i) {
if (im.data[i] < 0) im.data[i] = 0;
if (im.data[i] > 1) im.data[i] = 1;
}
}
// image.c
void save_image_png(image im, const char *name)
{
char buff[256];
sprintf(buff, "%s.png", name);
unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char));
int i, k;
for (k = 0; k < im.c; ++k) {
for (i = 0; i < im.w*im.h; ++i) {
data[i*im.c + k] = (unsigned char)(255 * im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if (!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
// image.c
void show_image(image p, const char *name)
{
fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name);
save_image_png(p, name);
}
// image.c
float get_color(int c, int x, int max)
{
static float colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } };
float ratio = ((float)x / max) * 5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1 - ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
// -------------- option_list.c --------------------
// option_list.c
typedef struct {
char *key;
char *val;
int used;
} kvp;
// option_list.c
void option_insert(list *l, char *key, char *val)
{
kvp *p = malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
// option_list.c
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for (i = 0; i < len; ++i) {
if (s[i] == '=') {
s[i] = '\0';
val = s + i + 1;
break;
}
}
if (i == len - 1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
// option_list.c
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if (file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while ((line = fgetl(file)) != 0) {
++nu;
strip(line);
switch (line[0]) {
case '\0':
case '#':
case ';':
free(line);
break;
default:
if (!read_option(line, options)) {
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
// option_list.c
void option_unused(list *l)
{
node *n = l->front;
while (n) {
kvp *p = (kvp *)n->val;
if (!p->used) {
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
// option_list.c
char *option_find(list *l, char *key)
{
node *n = l->front;
while (n) {
kvp *p = (kvp *)n->val;
if (strcmp(p->key, key) == 0) {
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
// option_list.c
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if (v) return v;
if (def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
// option_list.c
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if (v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
// option_list.c
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if (v) return atoi(v);
return def;
}
// option_list.c
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if (v) return atof(v);
return def;
}
// option_list.c
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if (v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
// -------------- parser.c --------------------
// parser.c
typedef struct size_params {
int quantized;
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network net;
} size_params;
// parser.c
typedef struct {
char *type;
list *options;
}section;
// parser.c
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if (file == 0) file_error(filename);
char *line;
int nu = 0;
list *sections = make_list();
section *current = 0;
while ((line = fgetl(file)) != 0) {
++nu;
strip(line);
switch (line[0]) {
case '[':
current = malloc(sizeof(section));
list_insert(sections, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if (!read_option(line, current->options)) {
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return sections;
}
// parser.c
void load_convolutional_weights_cpu(layer l, FILE *fp)
{
int num = l.n*l.c*l.size*l.size;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize && (!l.dontloadscales)) {
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
// parser.c
void load_weights_upto_cpu(network *net, char *filename, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if (!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
if ((major * 10 + minor) >= 2) {
fread(net->seen, sizeof(uint64_t), 1, fp);
}
else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//int transpose = (major > 1000) || (minor > 1000);
int i;
for (i = 0; i < net->n && i < cutoff; ++i) {
layer l = net->layers[i];
if (l.dontload) continue;
if (l.type == CONVOLUTIONAL) {
load_convolutional_weights_cpu(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
// parser.c
convolutional_layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters", 1);
int size = option_find_int(options, "size", 1);
int stride = option_find_int(options, "stride", 1);
int pad = option_find_int_quiet(options, "pad", 0);
int padding = option_find_int_quiet(options, "padding", 0);
if (pad) padding = size / 2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch, h, w, c;
h = params.h;
w = params.w;
c = params.c;
batch = params.batch;
if (!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int quantized = params.quantized;
if (params.index == 0 || activation == LINEAR || (params.index > 1 && stride>1) || size==1)
quantized = 0; // disable Quantized for 1st and last layers
convolutional_layer layer = make_convolutional_layer(batch, h, w, c, n, size, stride, padding, activation, batch_normalize, params.net.adam, quantized);
layer.flipped = option_find_int_quiet(options, "flipped", 0);
layer.dot = option_find_float_quiet(options, "dot", 0);
if (params.net.adam) {
layer.B1 = params.net.B1;
layer.B2 = params.net.B2;
layer.eps = params.net.eps;
}
return layer;
}
// parser.c
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.max_boxes = option_find_int_quiet(options, "max", 30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore", 0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match", 0);
char *tree_file = option_find_str(options, "tree", 0);
if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if (a) {
int len = strlen(a);
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (a[i] == ',') ++n;
}
for (i = 0; i < n; ++i) {
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',') + 1;
}
}
return l;
}
// parser.c
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if (a) {
int len = strlen(a);
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (a[i] == ',') ++n;
}
mask = calloc(n, sizeof(int));
for (i = 0; i < n; ++i) {
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',') + 1;
}
*num = n;
}
return mask;
}
// parser.c
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
int max_boxes = option_find_int_quiet(options, "max", 90);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes, max_boxes);
if (l.outputs != params.inputs) {
printf("Error: l.outputs == params.inputs \n");
printf("filters= in the [convolutional]-layer doesn't correspond to classes= or mask= in [yolo]-layer \n");
exit(EXIT_FAILURE);
}
//assert(l.outputs == params.inputs);
char *map_file = option_find_str(options, "map", 0);
if (map_file) l.map = read_map(map_file);
l.jitter = option_find_float(options, "jitter", .2);
l.focal_loss = option_find_int_quiet(options, "focal_loss", 0);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if (a) {
int len = strlen(a);
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (a[i] == ',') ++n;
}
for (i = 0; i < n && i < total * 2; ++i) {
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',') + 1;
}
}
return l;
}
// parser.c
softmax_layer parse_softmax(list *options, size_params params)
{
int groups = option_find_int_quiet(options, "groups", 1);
softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups);
layer.temperature = option_find_float_quiet(options, "temperature", 1);
char *tree_file = option_find_str(options, "tree", 0);
if (tree_file) layer.softmax_tree = read_tree(tree_file);
return layer;
}
// parser.c
maxpool_layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride", 1);
int size = option_find_int(options, "size", stride);
int padding = option_find_int_quiet(options, "padding", size - 1);
int batch, h, w, c;
h = params.h;
w = params.w;
c = params.c;
batch = params.batch;
if (!(h && w && c)) error("Layer before maxpool layer must output image.");
maxpool_layer layer = make_maxpool_layer(batch, h, w, c, size, stride, padding);
return layer;
}
// parser.c
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride", 1);
int reverse = option_find_int_quiet(options, "reverse", 0);
int batch, h, w, c;
h = params.h;
w = params.w;
c = params.c;
batch = params.batch;
if (!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch, w, h, c, stride, reverse);
return layer;
}
// parser.c
layer parse_upsample(list *options, size_params params, network net)
{
int stride = option_find_int(options, "stride", 2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
// parser.c
layer parse_shortcut(list *options, size_params params, network net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if (index < 0) index = params.index + index;
int batch = params.batch;
layer from = net.layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
return s;
}
// parser.c
route_layer parse_route(list *options, size_params params, network net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if (!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (l[i] == ',') ++n;
}
int *layers = calloc(n, sizeof(int));
int *sizes = calloc(n, sizeof(int));
for (i = 0; i < n; ++i) {
int index = atoi(l);
l = strchr(l, ',') + 1;
if (index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net.layers[index].outputs;
}
int batch = params.batch;
route_layer layer = make_route_layer(batch, n, layers, sizes);
convolutional_layer first = net.layers[layers[0]];
layer.out_w = first.out_w;
layer.out_h = first.out_h;
layer.out_c = first.out_c;
for (i = 1; i < n; ++i) {
int index = layers[i];
convolutional_layer next = net.layers[index];
if (next.out_w == first.out_w && next.out_h == first.out_h) {
layer.out_c += next.out_c;
}
else {
layer.out_h = layer.out_w = layer.out_c = 0;
}
}
return layer;
}
// parser.c
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while (n) {
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
// parser.c
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[yolo]") == 0) return YOLO;
if (strcmp(type, "[region]") == 0) return REGION;
if (strcmp(type, "[conv]") == 0
|| strcmp(type, "[convolutional]") == 0) return CONVOLUTIONAL;
if (strcmp(type, "[net]") == 0
|| strcmp(type, "[network]") == 0) return NETWORK;
if (strcmp(type, "[max]") == 0
|| strcmp(type, "[maxpool]") == 0) return MAXPOOL;
if (strcmp(type, "[reorg]") == 0) return REORG;
if (strcmp(type, "[upsample]") == 0) return UPSAMPLE;
if (strcmp(type, "[shortcut]") == 0) return SHORTCUT;
if (strcmp(type, "[soft]") == 0
|| strcmp(type, "[softmax]") == 0) return SOFTMAX;
if (strcmp(type, "[route]") == 0) return ROUTE;
return BLANK;
}
// parser.c
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random") == 0) return RANDOM;
if (strcmp(s, "poly") == 0) return POLY;
if (strcmp(s, "constant") == 0) return CONSTANT;
if (strcmp(s, "step") == 0) return STEP;
if (strcmp(s, "exp") == 0) return EXP;
if (strcmp(s, "sigmoid") == 0) return SIG;
if (strcmp(s, "steps") == 0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
// parser.c
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch", 1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions", 1);
net->time_steps = option_find_int_quiet(options, "time_steps", 1);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
char *a = option_find_str(options, "input_calibration", 0);
if (a) {
int len = strlen(a);
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (a[i] == ',') ++n;
}
net->input_calibration_size = n;
net->input_calibration = (float *)calloc(n, sizeof(float));
for (i = 0; i < n; ++i) {
float coef = atof(a);
net->input_calibration[i] = coef;
a = strchr(a, ',') + 1;
}
}
net->adam = option_find_int_quiet(options, "adam", 0);
if (net->adam) {
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .000001);
}
net->h = option_find_int_quiet(options, "height", 0);
net->w = option_find_int_quiet(options, "width", 0);
net->c = option_find_int_quiet(options, "channels", 0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop", net->w * 2);
net->min_crop = option_find_int_quiet(options, "min_crop", net->w);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if (!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
if (net->policy == STEP) {
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
}
else if (net->policy == STEPS) {
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if (!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for (i = 0; i < len; ++i) {
if (l[i] == ',') ++n;
}
int *steps = calloc(n, sizeof(int));
float *scales = calloc(n, sizeof(float));
for (i = 0; i < n; ++i) {
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',') + 1;
p = strchr(p, ',') + 1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
}
else if (net->policy == EXP) {
net->gamma = option_find_float(options, "gamma", 1);
}
else if (net->policy == SIG) {
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
}
else if (net->policy == POLY || net->policy == RANDOM) {
net->power = option_find_float(options, "power", 1);
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
// parser.c
network parse_network_cfg(char *filename, int batch, int quantized)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if (!n) error("Config file has no sections");
network net = make_network(sections->size - 1);
net.quantized = quantized;
net.do_input_calibration = 0;
net.gpu_index = gpu_index;
size_params params;
params.quantized = quantized;
section *s = (section *)n->val;
list *options = s->options;
if (strcmp(s->type, "[net]") == 0 && strcmp(s->type, "[network]") == 0)
error("First section must be [net] or [network]");
parse_net_options(options, &net);
params.h = net.h;
params.w = net.w;
params.c = net.c;
params.inputs = net.inputs;
if (batch > 0) net.batch = batch;
params.batch = net.batch;
params.time_steps = net.time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while (n) {
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
layer l = { 0 };
LAYER_TYPE lt = string_to_layer_type(s->type);
if (lt == CONVOLUTIONAL) {
// if(count == 80) params.quantized = 0; // doesn't lost GPU - mAP = 45.61%
node *tmp = n->next;
if(tmp) tmp = tmp->next;
if (tmp)
{
if (string_to_layer_type(((section *)tmp->val)->type) == YOLO) {
params.quantized = 0; // mAP = 53.60%
//printf("\n\n i = %d \n\n", count);
}
}
l = parse_convolutional(options, params);
}
else if (lt == REGION) {
l = parse_region(options, params);
}
else if (lt == YOLO) {
l = parse_yolo(options, params);
}
else if (lt == SOFTMAX) {
l = parse_softmax(options, params);
net.hierarchy = l.softmax_tree;
}
else if (lt == MAXPOOL) {
l = parse_maxpool(options, params);
}
else if (lt == REORG) {
l = parse_reorg(options, params);
}
else if (lt == ROUTE) {
l = parse_route(options, params, net);
}
else if (lt == UPSAMPLE) {
l = parse_upsample(options, params, net);
}
else if (lt == SHORTCUT) {
l = parse_shortcut(options, params, net);
}
else {
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.dontload = option_find_int_quiet(options, "dontload", 0);
l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
option_unused(options);
net.layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if (n) {
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
net.outputs = get_network_output_size(net);
net.output = get_network_output(net);
if (workspace_size) {
//printf("%ld\n", workspace_size);
net.workspace = calloc(1, workspace_size);
}
return net;
}
// -------------- gettimeofday for Windows--------------------
#if defined(_MSC_VER)
int gettimeofday(struct timeval *tv, struct timezone *tz)
{
FILETIME ft;
unsigned __int64 tmpres = 0;
static int tzflag;
if (NULL != tv)
{
GetSystemTimeAsFileTime(&ft);
tmpres |= ft.dwHighDateTime;
tmpres <<= 32;
tmpres |= ft.dwLowDateTime;
/*converting file time to unix epoch*/
tmpres -= DELTA_EPOCH_IN_MICROSECS;
tmpres /= 10; /*convert into microseconds*/
tv->tv_sec = (long)(tmpres / 1000000UL);
tv->tv_usec = (long)(tmpres % 1000000UL);
}
if (NULL != tz)
{
if (!tzflag)
{
_tzset();
tzflag++;
}
tz->tz_minuteswest = _timezone / 60;
tz->tz_dsttime = _daylight;
}
return 0;
}
#endif // _MSC_VER
// ------------------------------------------------------
// Calculate mAP and TP/FP/FN, IoU, F1
#include "pthread.h"
//#include "box.h"
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
float box_iou(box a, box b);
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, LETTERBOX_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA
} data_type;
typedef struct matrix {
int rows, cols;
float **vals;
} matrix;
typedef struct {
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef struct {
int id;
float x, y, w, h;
float left, right, top, bottom;
} box_label;
typedef struct load_args {
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int small_object;
float jitter;
int flip;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
tree *hierarchy;
} load_args;
int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4 + l.classes + 1) + entry*l.w*l.h + loc;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i) {
for (n = 0; n < l.n; ++n) {
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if (l.output[obj_index] > thresh) {
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for (i = 0; i < net->n; ++i) {
layer l = net->layers[i];
if (l.type == YOLO) {
s += yolo_num_detections(l, thresh);
}
if (l.type == DETECTION || l.type == REGION) {
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
if (num) *num = nboxes;
detection *dets = calloc(nboxes, sizeof(detection));
for (i = 0; i < nboxes; ++i) {
dets[i].prob = calloc(l.classes, sizeof(float));
if (l.coords > 4) {
dets[i].mask = calloc(l.coords - 4, sizeof(float));
}
}
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for (i = 0; i < n; ++i) {
free(dets[i].prob);
if (dets[i].mask) free(dets[i].mask);
}
free(dets);
}
void find_replace(char *str, char *orig, char *rep, char *output)
{
char buffer[4096] = { 0 };
char *p;
sprintf(buffer, "%s", str);
if (!(p = strstr(buffer, orig))) { // Is 'orig' even in 'str'?
sprintf(output, "%s", str);
return;
}
*p = '\0';
sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig));
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative, int letter)
{
int i;
int new_w = 0;
int new_h = 0;
if (letter) {
if (((float)netw / w) < ((float)neth / h)) {
new_w = netw;
new_h = (h * netw) / w;
}
else {
new_h = neth;
new_w = (w * neth) / h;
}
}
else {
new_w = netw;
new_h = neth;
}
for (i = 0; i < n; ++i) {
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw);
b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth);
b.w *= (float)netw / new_w;
b.h *= (float)neth / new_h;
if (!relative) {
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
// yolo_layer.c
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0 * stride]) / lw;
b.y = (j + x[index + 1 * stride]) / lh;
b.w = exp(x[index + 2 * stride]) * biases[2 * n] / w;
b.h = exp(x[index + 3 * stride]) * biases[2 * n + 1] / h;
return b;
}
// yolo_layer.c
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets, int letter)
{
int i, j, n;
float *predictions = l.output;
//if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
for (n = 0; n < l.n; ++n) {
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
//if (objectness <= thresh) continue; // incorrect behavior for Nan values
if (objectness > thresh) {
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for (j = 0; j < l.classes; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative, letter);
return count;
}
// get prediction boxes: yolov2_forward_network.c
void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map);
void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter)
{
box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
int i, j;
for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float *));
get_region_boxes_cpu(l, 1, 1, thresh, probs, boxes, 0, map);
for (j = 0; j < l.w*l.h*l.n; ++j) {
dets[j].classes = l.classes;
dets[j].bbox = boxes[j];
dets[j].objectness = 1;
for (i = 0; i < l.classes; ++i) {
dets[j].prob[i] = probs[j][i];
}
}
free(boxes);
free_ptrs((void **)probs, l.w*l.h*l.n);
//correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative);
correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter)
{
int j;
for (j = 0; j < net->n; ++j) {
layer l = net->layers[j];
if (l.type == YOLO) {
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter);
dets += count;
}
if (l.type == REGION) {
custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter);
//get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter);
return dets;
}
void *load_thread(void *ptr)
{
load_args a = *(struct load_args*)ptr;
if (a.type == IMAGE_DATA) {
*(a.im) = load_image(a.path, 0, 0, 3);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
//printf(" a.path = %s, a.w = %d, a.h = %d \n", a.path, a.w, a.h);
}
else if (a.type == LETTERBOX_DATA) {
printf(" LETTERBOX_DATA isn't implemented \n");
getchar();
//*(a.im) = load_image(a.path, 0, 0, 0);
//*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
}
else {
printf("unknown DATA type = %d \n", a.type);
getchar();
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if (pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
box_label *read_boxes(char *filename, int *n)
{
box_label *boxes = calloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file)
{
//file_error(filename);
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while (fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5) {
boxes = realloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w / 2;
boxes[count].right = x + w / 2;
boxes[count].top = y - h / 2;
boxes[count].bottom = y + h / 2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
typedef struct {
box b;
float p;
int class_id;
int image_index;
int truth_flag;
int unique_truth_index;
} box_prob;
int detections_comparator(const void *pa, const void *pb)
{
box_prob a = *(box_prob *)pa;
box_prob b = *(box_prob *)pb;
float diff = a.p - b.p;
if (diff < 0) return 1;
else if (diff > 0) return -1;
return 0;
}
int nms_comparator_v3(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if (b.sort_class >= 0) {
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
}
else {
diff = a.objectness - b.objectness;
}
if (diff < 0) return 1;
else if (diff > 0) return -1;
return 0;
}
void do_nms_sort_v3(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total - 1;
for (i = 0; i <= k; ++i) {
if (dets[i].objectness == 0) {
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k + 1;
for (k = 0; k < classes; ++k) {
for (i = 0; i < total; ++i) {
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator_v3);
for (i = 0; i < total; ++i) {
//printf(" k = %d, \t i = %d \n", k, i);
if (dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for (j = i + 1; j < total; ++j) {
box b = dets[j].bbox;
if (box_iou(a, b) > thresh) {
dets[j].prob[k] = 0;
}
}
}
}
}
void validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, float thresh_calc_avg_iou, int quantized, int save_params, const float iou_thresh)
{
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "dataset/target.txt");
char *difficult_valid_images = option_find_str(options, "difficult", NULL);
char *name_list = option_find_str(options, "names", "yolohw.names");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network net = parse_network_cfg(cfgfile, 1, quantized);
//parse_network_cfg_custom(cfgfile, 1); // set batch=1
if (weightfile) {
load_weights_upto_cpu(&net, weightfile, net.n);
}
//set_batch_network(&net, 1);
yolov2_fuse_conv_batchnorm(net);
if (quantized) {
do_quantization(net);
if (save_params) {
printf("\n Saving quantized parameters... \n\n");
save_quantized_model(net);
}
}
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
char **paths_dif = NULL;
if (difficult_valid_images) {
list *plist_dif = get_paths(difficult_valid_images);
paths_dif = (char **)list_to_array(plist_dif);
}
layer l = net.layers[net.n - 1];
int classes = l.classes;
int m = plist->size;
int i = 0;
int t;
const float thresh = .005;
float nms = .45;
//const float iou_thresh = 0.5;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = { 0 };
args.w = net.w;
args.h = net.h;
args.type = IMAGE_DATA;
//args.type = LETTERBOX_DATA;
//const float thresh_calc_avg_iou = 0.24;
float avg_iou = 0;
int tp_for_thresh = 0;
int fp_for_thresh = 0;
box_prob *detections = calloc(1, sizeof(box_prob));
int detections_count = 0;
int unique_truth_count = 0;
int *truth_classes_count = calloc(classes, sizeof(int));
for (t = 0; t < nthreads; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
time_t start = time(0);
for (i = nthreads; i < m + nthreads; i += nthreads) {
fprintf(stderr, "%d\n", i);
for (t = 0; t < nthreads && i + t - nthreads < m; ++t) {
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for (t = 0; t < nthreads && i + t < m; ++t) {
args.path = paths[i + t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for (t = 0; t < nthreads && i + t - nthreads < m; ++t)
{
#ifndef WIN32
//if (!quantized) usleep(50000);
usleep(50000); // 0.005 sec
#endif
const int image_index = i + t - nthreads;
char *path = paths[image_index];
//char *id = basecfg(path);
float *X = val_resized[t].data;
//network_predict(net, X);
if (quantized) {
network_predict_quantized(net, X); // quantized
//nms = 0.2;
}
else {
network_predict_cpu(net, X);
}
int nboxes = 0;
int letterbox = (args.type == LETTERBOX_DATA);
float hier_thresh = 0;
detection *dets = get_network_boxes(&net, 1, 1, thresh, hier_thresh, 0, 0, &nboxes, letterbox);
//detection *dets = get_network_boxes(&net, val[t].w, val[t].h, thresh, hier_thresh, 0, 1, &nboxes, letterbox); // for letterbox=1
if (nms) do_nms_sort_v3(dets, nboxes, l.classes, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
//printf(" labelpath = %s \n", labelpath);
int i, j;
for (j = 0; j < num_labels; ++j) {
truth_classes_count[truth[j].id]++;
}
// difficult
box_label *truth_dif = NULL;
int num_labels_dif = 0;
if (paths_dif)
{
char *path_dif = paths_dif[image_index];
char labelpath_dif[4096];
find_replace(path_dif, "images", "labels", labelpath_dif);
find_replace(labelpath_dif, "JPEGImages", "labels", labelpath_dif);
find_replace(labelpath_dif, ".jpg", ".txt", labelpath_dif);
find_replace(labelpath_dif, ".JPEG", ".txt", labelpath_dif);
find_replace(labelpath_dif, ".png", ".txt", labelpath_dif);
truth_dif = read_boxes(labelpath_dif, &num_labels_dif);
}
const int checkpoint_detections_count = detections_count;
for (i = 0; i < nboxes; ++i) {
int class_id;
for (class_id = 0; class_id < classes; ++class_id) {
float prob = dets[i].prob[class_id];
if (prob > 0) {
detections_count++;
detections = realloc(detections, detections_count * sizeof(box_prob));
detections[detections_count - 1].b = dets[i].bbox;
detections[detections_count - 1].p = prob;
detections[detections_count - 1].image_index = image_index;
detections[detections_count - 1].class_id = class_id;
detections[detections_count - 1].truth_flag = 0;
detections[detections_count - 1].unique_truth_index = -1;
int truth_index = -1;
float max_iou = 0;
for (j = 0; j < num_labels; ++j)
{
box t = { truth[j].x, truth[j].y, truth[j].w, truth[j].h };
//printf(" IoU = %f, prob = %f, class_id = %d, truth[j].id = %d \n",
// box_iou(dets[i].bbox, t), prob, class_id, truth[j].id);
float current_iou = box_iou(dets[i].bbox, t);
if (current_iou > iou_thresh && class_id == truth[j].id) {
if (current_iou > max_iou) {
max_iou = current_iou;
truth_index = unique_truth_count + j;
}
}
}
// best IoU
if (truth_index > -1) {
detections[detections_count - 1].truth_flag = 1;
detections[detections_count - 1].unique_truth_index = truth_index;
}
else {
// if object is difficult then remove detection
for (j = 0; j < num_labels_dif; ++j) {
box t = { truth_dif[j].x, truth_dif[j].y, truth_dif[j].w, truth_dif[j].h };
float current_iou = box_iou(dets[i].bbox, t);
if (current_iou > iou_thresh && class_id == truth_dif[j].id) {
--detections_count;
break;
}
}
}
// calc avg IoU, true-positives, false-positives for required Threshold
if (prob > thresh_calc_avg_iou) {
int z, found = 0;
for (z = checkpoint_detections_count; z < detections_count - 1; ++z)
if (detections[z].unique_truth_index == truth_index) {
found = 1; break;
}
if (truth_index > -1 && found == 0) {
avg_iou += max_iou;
++tp_for_thresh;
}
else
fp_for_thresh++;
}
}
}
}
unique_truth_count += num_labels;
free_detections(dets, nboxes);
free_image(val[t]);
free_image(val_resized[t]);
}
}
if ((tp_for_thresh + fp_for_thresh) > 0) {
avg_iou = avg_iou / (tp_for_thresh + fp_for_thresh);
}
// SORT(detections)
qsort(detections, detections_count, sizeof(box_prob), detections_comparator);
typedef struct {
double precision;
double recall;
int tp, fp, fn;
} pr_t;
// for PR-curve
pr_t **pr = calloc(classes, sizeof(pr_t*));
for (i = 0; i < classes; ++i) {
pr[i] = calloc(detections_count, sizeof(pr_t));
}
printf("detections_count = %d, unique_truth_count = %d \n", detections_count, unique_truth_count);
int *truth_flags = calloc(unique_truth_count, sizeof(int));
int rank;
for (rank = 0; rank < detections_count; ++rank) {
if (rank % 100 == 0)
printf(" rank = %d of ranks = %d \r", rank, detections_count);
if (rank > 0) {
int class_id;
for (class_id = 0; class_id < classes; ++class_id) {
pr[class_id][rank].tp = pr[class_id][rank - 1].tp;
pr[class_id][rank].fp = pr[class_id][rank - 1].fp;
}
}
box_prob d = detections[rank];
// if (detected && isn't detected before)
if (d.truth_flag == 1) {
if (truth_flags[d.unique_truth_index] == 0)
{
truth_flags[d.unique_truth_index] = 1;
pr[d.class_id][rank].tp++; // true-positive
}
}
else {
pr[d.class_id][rank].fp++; // false-positive
}
for (i = 0; i < classes; ++i)
{
const int tp = pr[i][rank].tp;
const int fp = pr[i][rank].fp;
const int fn = truth_classes_count[i] - tp; // false-negative = objects - true-positive
pr[i][rank].fn = fn;
if ((tp + fp) > 0) pr[i][rank].precision = (double)tp / (double)(tp + fp);
else pr[i][rank].precision = 0;
if ((tp + fn) > 0) pr[i][rank].recall = (double)tp / (double)(tp + fn);
else pr[i][rank].recall = 0;
}
}
free(truth_flags);
double mean_average_precision = 0;
int unincluded_classes = 0; // To prevent the AP of unincluded classes from spoiling the mAP
for (i = 0; i < classes; ++i) {
double avg_precision = 0;
int point;
if (truth_classes_count[i] == 0) unincluded_classes++;
for (point = 0; point < 11; ++point) {
double cur_recall = point * 0.1;
double cur_precision = 0;
for (rank = 0; rank < detections_count; ++rank)
{
if (pr[i][rank].recall >= cur_recall) { // > or >=
if (pr[i][rank].precision > cur_precision) {
cur_precision = pr[i][rank].precision;
}
}
}
//printf("class_id = %d, point = %d, cur_recall = %.4f, cur_precision = %.4f \n", i, point, cur_recall, cur_precision);
avg_precision += cur_precision;
}
avg_precision = avg_precision / 11;
printf("class_id = %d, name = %s, \t ap = %2.2f %% \n", i, names[i], avg_precision * 100);
mean_average_precision += avg_precision;
}
const float cur_precision = (float)tp_for_thresh / ((float)tp_for_thresh + (float)fp_for_thresh);
const float cur_recall = (float)tp_for_thresh / ((float)tp_for_thresh + (float)(unique_truth_count - tp_for_thresh));
const float f1_score = 2.F * cur_precision * cur_recall / (cur_precision + cur_recall);
printf(" for thresh = %1.2f, precision = %1.2f, recall = %1.2f, F1-score = %1.2f \n",
thresh_calc_avg_iou, cur_precision, cur_recall, f1_score);
printf(" for thresh = %0.2f, TP = %d, FP = %d, FN = %d, average IoU = %2.2f %% \n",
thresh_calc_avg_iou, tp_for_thresh, fp_for_thresh, unique_truth_count - tp_for_thresh, avg_iou * 100);
mean_average_precision = mean_average_precision / (classes-unincluded_classes);
if (iou_thresh == 0.5) {
printf("\n mean average precision (mAP) = %f, or %2.2f %% \n", mean_average_precision, mean_average_precision * 100);
}
else {
printf("\n average precision (AP) = %f, or %2.2f %% for IoU threshold = %f \n", mean_average_precision, mean_average_precision * 100, iou_thresh);
}
for (i = 0; i < classes; ++i) {
free(pr[i]);
}
free(pr);
free(detections);
free(truth_classes_count);
fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)(time(0) - start));
//getchar();
} |
GB_binop__div_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fc32)
// A*D function (colscale): GB (_AxD__div_fc32)
// D*A function (rowscale): GB (_DxB__div_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fc32)
// C=scalar+B GB (_bind1st__div_fc32)
// C=scalar+B' GB (_bind1st_tran__div_fc32)
// C=A+scalar GB (_bind2nd__div_fc32)
// C=A'+scalar GB (_bind2nd_tran__div_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_div (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_div (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_div (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_div (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bench.c | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#include <omp.h>
#ifdef _MPI
#include <mpi.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#define irho 0
#define imx 1
#define imy 2
#define imz 3
#define iene 4
#define qu 1
#define qv 2
#define qw 3
#define qpres 4
#define ALP 0.8
#define BET -0.2
#define GAM 0.038095238095238 // 4.0/105.0
#define DEL -0.003571428571429 // -1.0/280.0
#define JBlockSize 11
//------------------------------------------------------------------------------------------------------------------------------
#include "timer.h"
#ifndef BL_NOBENCHMAIN
#include "FakeWriteMultifab.h"
#endif
//------------------------------------------------------------------------------------------------------------------------------
uint64_t _total_run_time;
uint64_t _total_time_hypterm;
uint64_t _total_time_hypterm_L1;
uint64_t _total_time_hypterm_L2;
uint64_t _total_time_hypterm_L3;
double frequency = -1.0;
//------------------------------------------------------------------------------------------------------------------------------
void init_timer() {
uint64_t t0 = CycleTime();
sleep(1);
uint64_t t1 = CycleTime();
frequency = (double)(t1-t0);
}
//------------------------------------------------------------------------------------------------------------------------------
void init_data(int lo[3], int hi[3], int fablo[3], int fabhi[3], int ng, double dx[3],
double ** __restrict__ _cons, double ** __restrict__ _q)
{
int ncomp = 5;
int c;
int lo0=fablo[0]; int hi0=fabhi[0];
int lo1=fablo[1]; int hi1=fabhi[1];
int lo2=fablo[2]; int hi2=fabhi[2];
int pencil = (hi0-lo0+1);
int plane = (hi1-lo1+1)*pencil;
/* we have to initialize the entire arrays, including ghost cells */
double * __restrict__ cons[5]; for(c=0;c<5;c++) { cons[c] = _cons[c]; }
double * __restrict__ q[5]; for(c=0;c<6;c++) { q[c] = _q[c]; }
int i, j, k;
double scale, xloc, yloc, zloc, rholoc, uvel, vvel, wvel, eloc;
double GAMMA, CV, CVinv, rhoinv;
scale = 1.0e0;
for(k=lo2;k<=hi2;k++){
zloc = ((double) k)*dx[2]/scale;
for(j=lo1;j<=hi1;j++){
yloc = ((double) j)*dx[1]/scale;
for(i=lo0;i<=hi0;i++){
xloc = ((double) i)*dx[0]/scale;
uvel = 1.1e4*sin(1.0*xloc)*sin(2.0*yloc)*sin(3.0*zloc);
vvel = 1.0e4*sin(2.0*xloc)*sin(4.0*yloc)*sin(1.0*zloc);
wvel = 1.2e4*sin(3.0*xloc)*cos(2.0*yloc)*sin(2.0*zloc);
rholoc = 1.0e-3 + 1.0e-5*sin(1.0*xloc)*cos(2.0*yloc)*cos(3.0*zloc);
eloc = 2.5e9 + 1.0e-3*sin(2.0*xloc)*cos(2.0*yloc)*sin(2.0*zloc);
int ijk = (i-fablo[0]) + (j-fablo[1])*pencil + (k-fablo[2])*plane;
cons[irho][ijk] = rholoc;
cons[imx][ijk] = rholoc*uvel;
cons[imy][ijk] = rholoc*vvel;
cons[imz][ijk] = rholoc*wvel;
cons[iene][ijk] = rholoc*(eloc + (uvel*uvel+vvel*vvel+wvel*wvel)/2.0);
}
}
}
GAMMA = 1.4e0;
CV = 8.3333333333e6;
CVinv = 1.0e0 / CV;
for(k=lo2;k<=hi2;k++){
for(j=lo1;j<=hi1;j++){
for(i=lo0;i<=hi0;i++){
int ijk = (i-fablo[0]) + (j-fablo[1])*pencil + (k-fablo[2])*plane;
rhoinv = 1.0e0/cons[irho][ijk];
q[irho][ijk] = cons[irho][ijk];
q[imx][ijk] = cons[imx][ijk] * rhoinv;
q[imy][ijk] = cons[imy][ijk] * rhoinv;
q[imz][ijk] = cons[imz][ijk] * rhoinv;
eloc = cons[iene][ijk]*rhoinv -
0.5e0*((q[imx][ijk]*q[imx][ijk]) +
(q[imy][ijk]*q[imy][ijk]) +
(q[imz][ijk]*q[imz][ijk]));
q[iene][ijk] = (GAMMA-1.0e0)*eloc*cons[irho][ijk];
q[iene+1][ijk] = eloc * CVinv;
}
}
}
}
//------------------------------------------------------------------------------------------------------------------------------
void hypterm_naive(int lo[3], int hi[3], int ng, double dx[3], double ** __restrict__ _cons, double ** __restrict__ _q, double ** __restrict__ _flux){ // (lo,hi,ng,dx,cons,q,flux)
// double precision, intent(in ) :: cons(-ng+lo(1):hi(1)+ng,-ng+lo(2):hi(2)+ng,-ng+lo(3):hi(3)+ng,5)
// double precision, intent(in ) :: q(-ng+lo(1):hi(1)+ng,-ng+lo(2):hi(2)+ng,-ng+lo(3):hi(3)+ng,6)
// double precision, intent(out) :: flux(lo(1):hi(1),lo(2):hi(2),lo(3):hi(3),5)
int c;
double dmin[5], dmax[5];
double dxinv0=1.0/dx[0];
double dxinv1=1.0/dx[1];
double dxinv2=1.0/dx[2];
int lo0=lo[0];int hi0=hi[0];
int lo1=lo[1];int hi1=hi[1];
int lo2=lo[2];int hi2=hi[2];
int pencil = (hi0-lo0+1);
int plane = ((hi1-lo1+1))*pencil;
int pencilg = (hi0-lo0+1)+2*ng;
int planeg = ((hi1-lo1+1)+2*ng)*pencilg;
double * __restrict__ cons[5];for(c=0;c<5;c++){cons[c] = _cons[c] + (ng+ng*pencilg+ng*planeg);}
double * __restrict__ q[6];for(c=0;c<6;c++){ q[c] = _q[c] + (ng+ng*pencilg+ng*planeg);}
double * __restrict__ flux[5];for(c=0;c<5;c++){flux[c] = _flux[c];}
int i,j,k;
int jb,JBlocks;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int L1iters = 0;
int L2iters = 0;
int L3iters = 0;
if(frequency < 0.0) {
init_timer();
}
uint64_t _time_start = CycleTime();
#pragma omp parallel for private(i,j,k) reduction(+ : L1iters)
for(k=lo2;k<=hi2;k++){
for(j=lo1;j<=hi1;j++){
for(i=lo0;i<=hi0;i++){
++L1iters;
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qum4 = q[qu][ijkg-4];
double qum3 = q[qu][ijkg-3];
double qum2 = q[qu][ijkg-2];
double qum1 = q[qu][ijkg-1];
double qup1 = q[qu][ijkg+1];
double qup2 = q[qu][ijkg+2];
double qup3 = q[qu][ijkg+3];
double qup4 = q[qu][ijkg+4];
flux[irho][ijk] =
- dxinv0*( ALP*( cons[ imx][ijkg+1] - cons[ imx][ijkg-1] ) +
BET*( cons[ imx][ijkg+2] - cons[ imx][ijkg-2] ) +
GAM*( cons[ imx][ijkg+3] - cons[ imx][ijkg-3] ) +
DEL*( cons[ imx][ijkg+4] - cons[ imx][ijkg-4] ) );
flux[imx][ijk] =
- dxinv0*( ALP*( cons[ imx][ijkg+1]*qup1 - cons[ imx][ijkg-1]*qum1 + q[qpres][ijkg+1] - q[qpres][ijkg-1] ) +
BET*( cons[ imx][ijkg+2]*qup2 - cons[ imx][ijkg-2]*qum2 + q[qpres][ijkg+2] - q[qpres][ijkg-2] ) +
GAM*( cons[ imx][ijkg+3]*qup3 - cons[ imx][ijkg-3]*qum3 + q[qpres][ijkg+3] - q[qpres][ijkg-3] ) +
DEL*( cons[ imx][ijkg+4]*qup4 - cons[ imx][ijkg-4]*qum4 + q[qpres][ijkg+4] - q[qpres][ijkg-4] ) );
flux[imy][ijk] =
- dxinv0*( ALP*( cons[ imy][ijkg+1]*qup1 - cons[ imy][ijkg-1]*qum1 ) +
BET*( cons[ imy][ijkg+2]*qup2 - cons[ imy][ijkg-2]*qum2 ) +
GAM*( cons[ imy][ijkg+3]*qup3 - cons[ imy][ijkg-3]*qum3 ) +
DEL*( cons[ imy][ijkg+4]*qup4 - cons[ imy][ijkg-4]*qum4 ) );
flux[imz][ijk] =
- dxinv0*( ALP*( cons[ imz][ijkg+1]*qup1 - cons[ imz][ijkg-1]*qum1 ) +
BET*( cons[ imz][ijkg+2]*qup2 - cons[ imz][ijkg-2]*qum2 ) +
GAM*( cons[ imz][ijkg+3]*qup3 - cons[ imz][ijkg-3]*qum3 ) +
DEL*( cons[ imz][ijkg+4]*qup4 - cons[ imz][ijkg-4]*qum4 ) );
flux[iene][ijk] =
- dxinv0*( ALP*( cons[iene][ijkg+1]*qup1 - cons[iene][ijkg-1]*qum1 + q[qpres][ijkg+1]*qup1 - q[qpres][ijkg-1]*qum1 ) +
BET*( cons[iene][ijkg+2]*qup2 - cons[iene][ijkg-2]*qum2 + q[qpres][ijkg+2]*qup2 - q[qpres][ijkg-2]*qum2 ) +
GAM*( cons[iene][ijkg+3]*qup3 - cons[iene][ijkg-3]*qum3 + q[qpres][ijkg+3]*qup3 - q[qpres][ijkg-3]*qum3 ) +
DEL*( cons[iene][ijkg+4]*qup4 - cons[iene][ijkg-4]*qum4 + q[qpres][ijkg+4]*qup4 - q[qpres][ijkg-4]*qum4 ) );
}}}
uint64_t _time_L1 = CycleTime();
_total_time_hypterm_L1 += (_time_L1 - _time_start);
for(c = 0; c < 5; ++c) {
dmin[c] = flux[c][0];
dmax[c] = flux[c][0];
}
for(k=lo2;k<=hi2;k++){
for(j=lo1;j<=hi1;j++){
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
for(c = 0; c < 5; ++c) {
dmin[c] = fmin(dmin[c], flux[c][ijk]);
dmax[c] = fmax(dmax[c], flux[c][ijk]);
}
}
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int pencilg2 = pencilg*2;
int pencilg3 = pencilg*3;
int pencilg4 = pencilg*4;
#pragma omp parallel for private(i,j,k) reduction(+ : L2iters)
for(k=lo2;k<=hi2;k++){
for(j=lo1;j<=hi1;j++){
for(i=lo0;i<=hi0;i++){
++L2iters;
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qvp1 = q[qv][ijkg+pencilg ];
double qvp2 = q[qv][ijkg+pencilg2 ];
double qvp3 = q[qv][ijkg+pencilg3 ];
double qvp4 = q[qv][ijkg+pencilg4 ];
double qvm1 = q[qv][ijkg-pencilg ];
double qvm2 = q[qv][ijkg-pencilg2 ];
double qvm3 = q[qv][ijkg-pencilg3 ];
double qvm4 = q[qv][ijkg-pencilg4 ];
flux[irho][ijk] = flux[irho][ijk]
- dxinv1*( ALP*( cons[ imy][ijkg+pencilg ] - cons[ imy][ijkg-pencilg ] ) +
BET*( cons[ imy][ijkg+pencilg2 ] - cons[ imy][ijkg-pencilg2 ] ) +
GAM*( cons[ imy][ijkg+pencilg3 ] - cons[ imy][ijkg-pencilg3 ] ) +
DEL*( cons[ imy][ijkg+pencilg4 ] - cons[ imy][ijkg-pencilg4 ] ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qvp1 = q[qv][ijkg+pencilg ];
double qvp2 = q[qv][ijkg+pencilg2 ];
double qvp3 = q[qv][ijkg+pencilg3 ];
double qvp4 = q[qv][ijkg+pencilg4 ];
double qvm1 = q[qv][ijkg-pencilg ];
double qvm2 = q[qv][ijkg-pencilg2 ];
double qvm3 = q[qv][ijkg-pencilg3 ];
double qvm4 = q[qv][ijkg-pencilg4 ];
flux[imx][ijk] = flux[imx][ijk]
- dxinv1*( ALP*( cons[ imx][ijkg+pencilg ]*qvp1 - cons[ imx][ijkg-pencilg ]*qvm1 ) +
BET*( cons[ imx][ijkg+pencilg2 ]*qvp2 - cons[ imx][ijkg-pencilg2 ]*qvm2 ) +
GAM*( cons[ imx][ijkg+pencilg3 ]*qvp3 - cons[ imx][ijkg-pencilg3 ]*qvm3 ) +
DEL*( cons[ imx][ijkg+pencilg4 ]*qvp4 - cons[ imx][ijkg-pencilg4 ]*qvm4 ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qvp1 = q[qv][ijkg+pencilg ];
double qvp2 = q[qv][ijkg+pencilg2 ];
double qvp3 = q[qv][ijkg+pencilg3 ];
double qvp4 = q[qv][ijkg+pencilg4 ];
double qvm1 = q[qv][ijkg-pencilg ];
double qvm2 = q[qv][ijkg-pencilg2 ];
double qvm3 = q[qv][ijkg-pencilg3 ];
double qvm4 = q[qv][ijkg-pencilg4 ];
flux[imy][ijk] = flux[imy][ijk]
- dxinv1*( ALP*( cons[ imy][ijkg+pencilg ]*qvp1 - cons[ imy][ijkg-pencilg ]*qvm1 + q[qpres][ijkg+pencilg ] - q[qpres][ijkg-pencilg ] ) +
BET*( cons[ imy][ijkg+pencilg2 ]*qvp2 - cons[ imy][ijkg-pencilg2 ]*qvm2 + q[qpres][ijkg+pencilg2 ] - q[qpres][ijkg-pencilg2 ] ) +
GAM*( cons[ imy][ijkg+pencilg3 ]*qvp3 - cons[ imy][ijkg-pencilg3 ]*qvm3 + q[qpres][ijkg+pencilg3 ] - q[qpres][ijkg-pencilg3 ] ) +
DEL*( cons[ imy][ijkg+pencilg4 ]*qvp4 - cons[ imy][ijkg-pencilg4 ]*qvm4 + q[qpres][ijkg+pencilg4 ] - q[qpres][ijkg-pencilg4 ] ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qvp1 = q[qv][ijkg+pencilg ];
double qvp2 = q[qv][ijkg+pencilg2 ];
double qvp3 = q[qv][ijkg+pencilg3 ];
double qvp4 = q[qv][ijkg+pencilg4 ];
double qvm1 = q[qv][ijkg-pencilg ];
double qvm2 = q[qv][ijkg-pencilg2 ];
double qvm3 = q[qv][ijkg-pencilg3 ];
double qvm4 = q[qv][ijkg-pencilg4 ];
flux[imz][ijk] = flux[imz][ijk]
- dxinv1*( ALP*( cons[ imz][ijkg+pencilg ]*qvp1 - cons[ imz][ijkg-pencilg ]*qvm1 ) +
BET*( cons[ imz][ijkg+pencilg2 ]*qvp2 - cons[ imz][ijkg-pencilg2 ]*qvm2 ) +
GAM*( cons[ imz][ijkg+pencilg3 ]*qvp3 - cons[ imz][ijkg-pencilg3 ]*qvm3 ) +
DEL*( cons[ imz][ijkg+pencilg4 ]*qvp4 - cons[ imz][ijkg-pencilg4 ]*qvm4 ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qvp1 = q[qv][ijkg+pencilg ];
double qvp2 = q[qv][ijkg+pencilg2 ];
double qvp3 = q[qv][ijkg+pencilg3 ];
double qvp4 = q[qv][ijkg+pencilg4 ];
double qvm1 = q[qv][ijkg-pencilg ];
double qvm2 = q[qv][ijkg-pencilg2 ];
double qvm3 = q[qv][ijkg-pencilg3 ];
double qvm4 = q[qv][ijkg-pencilg4 ];
flux[iene][ijk] = flux[iene][ijk]
- dxinv1*( ALP*( cons[iene][ijkg+pencilg ]*qvp1 - cons[iene][ijkg-pencilg ]*qvm1 + q[qpres][ijkg+pencilg ]*qvp1 - q[qpres][ijkg-pencilg ]*qvm1 ) +
BET*( cons[iene][ijkg+pencilg2 ]*qvp2 - cons[iene][ijkg-pencilg2 ]*qvm2 + q[qpres][ijkg+pencilg2 ]*qvp2 - q[qpres][ijkg-pencilg2 ]*qvm2 ) +
GAM*( cons[iene][ijkg+pencilg3 ]*qvp3 - cons[iene][ijkg-pencilg3 ]*qvm3 + q[qpres][ijkg+pencilg3 ]*qvp3 - q[qpres][ijkg-pencilg3 ]*qvm3 ) +
DEL*( cons[iene][ijkg+pencilg4 ]*qvp4 - cons[iene][ijkg-pencilg4 ]*qvm4 + q[qpres][ijkg+pencilg4 ]*qvp4 - q[qpres][ijkg-pencilg4 ]*qvm4 ) );
}
}}
uint64_t _time_L2 = CycleTime();
_total_time_hypterm_L2 += (_time_L2 - _time_L1);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int planeg2 = planeg*2;
int planeg3 = planeg*3;
int planeg4 = planeg*4;
JBlocks = (hi1-lo1+1 + JBlockSize-1 )/JBlockSize;
#pragma omp parallel for private(i,j,k,jb) schedule(static,1) reduction(+ : L3iters)
for(jb=0;jb<JBlocks;jb++){
for(k=lo2;k<=hi2;k++){
for(j=jb*JBlockSize;j<(jb+1)*JBlockSize;j++)if(j<=hi1){
//#pragma omp parallel for private(i,j,k) schedule(static,1)
//for(k=lo2;k<hi2;k++){
// for(j=lo1;j<hi1;j++){
for(i=lo0;i<=hi0;i++){
++L3iters;
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qwp1 = q[qw][ijkg+planeg ];
double qwp2 = q[qw][ijkg+planeg2];
double qwp3 = q[qw][ijkg+planeg3];
double qwp4 = q[qw][ijkg+planeg4];
double qwm1 = q[qw][ijkg-planeg ];
double qwm2 = q[qw][ijkg-planeg2];
double qwm3 = q[qw][ijkg-planeg3];
double qwm4 = q[qw][ijkg-planeg4];
flux[irho][ijk] = flux[irho][ijk]
- dxinv2*( ALP*( cons[ imz][ijkg+planeg ] - cons[ imz][ijkg-planeg ]) +
BET*( cons[ imz][ijkg+planeg2] - cons[ imz][ijkg-planeg2]) +
GAM*( cons[ imz][ijkg+planeg3] - cons[ imz][ijkg-planeg3]) +
DEL*( cons[ imz][ijkg+planeg4] - cons[ imz][ijkg-planeg4]) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qwp1 = q[qw][ijkg+planeg ];
double qwp2 = q[qw][ijkg+planeg2];
double qwp3 = q[qw][ijkg+planeg3];
double qwp4 = q[qw][ijkg+planeg4];
double qwm1 = q[qw][ijkg-planeg ];
double qwm2 = q[qw][ijkg-planeg2];
double qwm3 = q[qw][ijkg-planeg3];
double qwm4 = q[qw][ijkg-planeg4];
flux[imx][ijk] = flux[imx][ijk]
- dxinv2*( ALP*( cons[ imx][ijkg+planeg ]*qwp1 - cons[ imx][ijkg-planeg ]*qwm1 ) +
BET*( cons[ imx][ijkg+planeg2]*qwp2 - cons[ imx][ijkg-planeg2]*qwm2 ) +
GAM*( cons[ imx][ijkg+planeg3]*qwp3 - cons[ imx][ijkg-planeg3]*qwm3 ) +
DEL*( cons[ imx][ijkg+planeg4]*qwp4 - cons[ imx][ijkg-planeg4]*qwm4 ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qwp1 = q[qw][ijkg+planeg ];
double qwp2 = q[qw][ijkg+planeg2];
double qwp3 = q[qw][ijkg+planeg3];
double qwp4 = q[qw][ijkg+planeg4];
double qwm1 = q[qw][ijkg-planeg ];
double qwm2 = q[qw][ijkg-planeg2];
double qwm3 = q[qw][ijkg-planeg3];
double qwm4 = q[qw][ijkg-planeg4];
flux[imy][ijk] = flux[imy][ijk]
- dxinv2*( ALP*( cons[ imy][ijkg+planeg ]*qwp1 - cons[ imy][ijkg-planeg ]*qwm1 ) +
BET*( cons[ imy][ijkg+planeg2]*qwp2 - cons[ imy][ijkg-planeg2]*qwm2 ) +
GAM*( cons[ imy][ijkg+planeg3]*qwp3 - cons[ imy][ijkg-planeg3]*qwm3 ) +
DEL*( cons[ imy][ijkg+planeg4]*qwp4 - cons[ imy][ijkg-planeg4]*qwm4 ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qwp1 = q[qw][ijkg+planeg ];
double qwp2 = q[qw][ijkg+planeg2];
double qwp3 = q[qw][ijkg+planeg3];
double qwp4 = q[qw][ijkg+planeg4];
double qwm1 = q[qw][ijkg-planeg ];
double qwm2 = q[qw][ijkg-planeg2];
double qwm3 = q[qw][ijkg-planeg3];
double qwm4 = q[qw][ijkg-planeg4];
flux[imz][ijk] = flux[imz][ijk]
- dxinv2*( ALP*( cons[ imz][ijkg+planeg ]*qwp1 - cons[ imz][ijkg-planeg ]*qwm1 + q[qpres][ijkg+planeg ] - q[qpres][ijkg-planeg ] ) +
BET*( cons[ imz][ijkg+planeg2]*qwp2 - cons[ imz][ijkg-planeg2]*qwm2 + q[qpres][ijkg+planeg2] - q[qpres][ijkg-planeg2] ) +
GAM*( cons[ imz][ijkg+planeg3]*qwp3 - cons[ imz][ijkg-planeg3]*qwm3 + q[qpres][ijkg+planeg3] - q[qpres][ijkg-planeg3] ) +
DEL*( cons[ imz][ijkg+planeg4]*qwp4 - cons[ imz][ijkg-planeg4]*qwm4 + q[qpres][ijkg+planeg4] - q[qpres][ijkg-planeg4] ) );
}
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
int ijkg = i + j*pencilg + k*planeg;
double qwp1 = q[qw][ijkg+planeg ];
double qwp2 = q[qw][ijkg+planeg2];
double qwp3 = q[qw][ijkg+planeg3];
double qwp4 = q[qw][ijkg+planeg4];
double qwm1 = q[qw][ijkg-planeg ];
double qwm2 = q[qw][ijkg-planeg2];
double qwm3 = q[qw][ijkg-planeg3];
double qwm4 = q[qw][ijkg-planeg4];
flux[iene][ijk] = flux[iene][ijk]
- dxinv2*( ALP*( cons[iene][ijkg+planeg ]*qwp1 - cons[iene][ijkg-planeg ]*qwm1 + q[qpres][ijkg+planeg ]*qwp1 - q[qpres][ijkg-planeg ]*qwm1 ) +
BET*( cons[iene][ijkg+planeg2]*qwp2 - cons[iene][ijkg-planeg2]*qwm2 + q[qpres][ijkg+planeg2]*qwp2 - q[qpres][ijkg-planeg2]*qwm2 ) +
GAM*( cons[iene][ijkg+planeg3]*qwp3 - cons[iene][ijkg-planeg3]*qwm3 + q[qpres][ijkg+planeg3]*qwp3 - q[qpres][ijkg-planeg3]*qwm3 ) +
DEL*( cons[iene][ijkg+planeg4]*qwp4 - cons[iene][ijkg-planeg4]*qwm4 + q[qpres][ijkg+planeg4]*qwp4 - q[qpres][ijkg-planeg4]*qwm4 ) );
}
}}}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
uint64_t _time_L3 = CycleTime();
_total_time_hypterm_L3 += (_time_L3 - _time_L2);
_total_time_hypterm += (_time_L3 - _time_start);
printf("L1 iters = %d\n",L1iters );
printf("L2 iters = %d\n",L2iters );
printf("L3 iters = %d\n",L3iters );
#ifdef BL_NOBENCHMAIN
printf("-----------------\n");
printf(" L1 = %9.6f s\n",(double)(_time_L1 - _time_start)/frequency);
printf(" L2 = %9.6f s\n",(double)(_time_L2 - _time_L1)/frequency);
printf(" L3 = %9.6f s\n",(double)(_time_L3 - _time_L2)/frequency);
printf("hypterm = %9.6f s\n",(double)(_time_L3 - _time_start)/frequency);
#endif
for(c = 0; c < 5; ++c) {
dmin[c] = flux[c][0];
dmax[c] = flux[c][0];
}
for(k=lo2;k<=hi2;k++){
for(j=lo1;j<=hi1;j++){
for(i=lo0;i<=hi0;i++){
int ijk = i + j*pencil + k*plane;
for(c = 0; c < 5; ++c) {
dmin[c] = fmin(dmin[c], flux[c][ijk]);
dmax[c] = fmax(dmax[c], flux[c][ijk]);
}
}
}
}
printf("-----------------\n");
for(c = 0; c < 5; ++c) {
printf("hypterm: minmax flux[%d] = %e %e\n", c, dmin[c], dmax[c]);
}
printf("-----------------\n");
}
#ifndef BL_NOBENCHMAIN
//------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv){
int MPI_Rank=0;
int MPI_Tasks=1;
int OMP_Threads = 1;
#pragma omp parallel
{
#pragma omp master
{
////////////////////// OMP_Threads = omp_get_num_threads();
}
}
int max_grid_size = 64;
int NG = 4;
int NC = 5;
int lo[3] = {0,0,0};
int hi[3] = {max_grid_size-1,max_grid_size-1,max_grid_size-1};
int fablo[3], fabhi[3];
double dx[3], probLo[3], probHi[3];
double * tmpbuf;
int volume,c;
double **U; // NC, NG
posix_memalign((void**)&U,64,NC*sizeof(double*));
volume = (max_grid_size+2*NG)*(max_grid_size+2*NG)*(max_grid_size+2*NG);
posix_memalign((void**)&tmpbuf,64,volume*(NC)*sizeof(double));memset(tmpbuf,0,volume*(NC)*sizeof(double));
for(c=0;c<NC;c++){
U[c] = tmpbuf + c*volume;
}
double **F = (double**)malloc(NC*sizeof(double*)); // NC, 0
posix_memalign((void**)&F,64,NC*sizeof(double*));
volume = max_grid_size*max_grid_size*max_grid_size;
posix_memalign((void**)&tmpbuf,64,volume*(NC)*sizeof(double));memset(tmpbuf,0,volume*(NC)*sizeof(double));
for(c=0;c<NC;c++){
F[c] = tmpbuf + c*volume;
}
double **Q = (double**)malloc((NC+1)*sizeof(double*)); // NC+1, NG
volume = (max_grid_size+2*NG)*(max_grid_size+2*NG)*(max_grid_size+2*NG);
posix_memalign((void**)&tmpbuf,64,volume*(NC+1)*sizeof(double));memset(tmpbuf,0,volume*(NC+1)*sizeof(double));
for(c=0;c<NC+1;c++){
Q[c] = tmpbuf + c*volume;
}
for(c = 0; c < 3; ++c) {
probLo[c] = -2.3;
probHi[c] = 2.3;
dx[c] = (probHi[c] - probLo[c]) / ((double) (hi[c] - lo[c] + 1));
printf("dx = %f\n", dx[c]);
fablo[c] = lo[c] - NG;
fabhi[c] = hi[c] + NG;
}
init_data(lo, hi, fablo, fabhi, NG, dx, U, Q);
FakeWriteMultifab(lo, hi, fablo, fabhi, NG, NC, U, "mfUInit");
init_timer();
_total_run_time = 0;
_total_time_hypterm = 0;
_total_time_hypterm_L1 = 0;
_total_time_hypterm_L2 = 0;
_total_time_hypterm_L3 = 0;
int iteration,NIterations = 1;
uint64_t _run_time_start = CycleTime();
for(iteration=0;iteration<NIterations;iteration++){
// for all boxes...
hypterm_naive(lo,hi,NG,dx,U,Q,F);
}
uint64_t _run_time_end = CycleTime();
_total_run_time += (_run_time_end - _run_time_start);
FakeWriteMultifab(lo, hi, lo, hi, 0, NC, F, "mfFluxFinal");
#ifdef JBlockSize
printf("JBlockSize = %d\n",JBlockSize);
#endif
printf("-----------------\n");
printf(" L1 = %9.6f s\n",(double)_total_time_hypterm_L1/(double)NIterations/frequency);
printf(" L2 = %9.6f s\n",(double)_total_time_hypterm_L2/(double)NIterations/frequency);
printf(" L3 = %9.6f s\n",(double)_total_time_hypterm_L3/(double)NIterations/frequency);
printf("-----------------\n");
printf("hypterm = %9.6f s\n",(double)_total_time_hypterm /(double)NIterations/frequency);
printf("runtime = %9.6f s\n",(double)_total_run_time /frequency);
printf("-----------------\n");
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
train_proc.h | #ifndef _TRAIN_PROC_H
#define _TRAIN_PROC_H
#include "cnn/nodes.h"
#include "cnn/cnn.h"
#include "cnn/dnn.h"
#include "cnn/training.h"
#include "cnn/timing.h"
#include "cnn/rnn.h"
#include "cnn/lstm.h"
#include "cnn/dglstm.h"
#include "cnn/dict.h"
#include "cnn/model.h"
#include "cnn/expr.h"
#include "cnn/cnn-helper.h"
#include "ext/dialogue/attention_with_intention.h"
#include "ext/lda/lda.h"
#include "ext/ngram/ngram.h"
#include "cnn/data-util.h"
#include "cnn/grad-check.h"
#include "cnn/metric-util.h"
#include "ext/trainer/eval_proc.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <iterator>
#include <boost/algorithm/string.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/uniform_01.hpp>
#include <boost/random/variate_generator.hpp>
#include <boost/range/adaptor/reversed.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_wiarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_woarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/codecvt_null.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
using namespace std;
using namespace cnn;
using namespace boost::program_options;
//using namespace boost;
extern unsigned LAYERS;
extern unsigned HIDDEN_DIM; // 1024
extern unsigned ALIGN_DIM; // 1024
extern unsigned VOCAB_SIZE_SRC;
extern unsigned VOCAB_SIZE_TGT;
extern long nparallel;
extern long mbsize;
extern size_t g_train_on_turns;
extern cnn::real weight_IDF;
extern cnn::Dict sd;
extern cnn::Dict td;
extern cnn::stId2String<string> id2str;
extern int kSRC_SOS;
extern int kSRC_EOS;
extern int kTGT_SOS;
extern int kTGT_EOS;
extern int verbose;
extern int beam_search_decode;
extern cnn::real lambda; // = 1e-6;
extern int repnumber;
extern int rerankIDF;
extern int reinforceIDF;
extern cnn::real weight_IDF;
extern cnn::real weight_edist;
extern Sentence prv_response;
extern NumTurn2DialogId training_numturn2did;
extern NumTurn2DialogId devel_numturn2did;
extern NumTurn2DialogId test_numturn2did;
void reset_smoothed_ppl(vector<cnn::real>& ppl_hist);
cnn::real smoothed_ppl(cnn::real curPPL, vector<cnn::real>& ppl_hist);
#define MAX_NBR_TRUNS 200000
struct TrainingScores{
public:
long swords; /// source side number of words
long twords; /// target side number of words
int training_score_current_location;
int training_score_buf_size;
cnn::real *training_scores;
cnn::real dloss;
public:
TrainingScores(int bufsize) : training_score_buf_size(bufsize) {
training_scores = (cnn::real*) cnn_mm_malloc(training_score_buf_size * sizeof(cnn::real), CNN_ALIGN);
training_score_current_location = 0;
swords = 0;
twords = 0;
}
~TrainingScores()
{
cnn_mm_free(training_scores);
}
void reset()
{
swords = 0;
twords = 0;
training_score_current_location = 0;
}
cnn::real compute_score()
{
if (training_score_current_location > MAX_NBR_TRUNS - 1)
std::runtime_error("TrainingScore out of memory");
dloss = 0;
vector<cnn::real> raw_score = as_vector(training_score_current_location, training_scores);
for (auto& p : raw_score)
dloss += p;
return dloss;
}
};
/**
The higher level training process
*/
template <class Proc>
class TrainProcess{
private:
TrainingScores* training_set_scores;
TrainingScores* dev_set_scores;
TFIDFMetric * ptr_tfidfScore;;
public:
TrainProcess() {
training_set_scores = new TrainingScores(MAX_NBR_TRUNS);
dev_set_scores = new TrainingScores(MAX_NBR_TRUNS);
ptr_tfidfScore = nullptr;
}
~TrainProcess()
{
delete training_set_scores;
delete dev_set_scores;
if (ptr_tfidfScore)
delete ptr_tfidfScore;
}
void prt_model_info(size_t LAYERS, size_t VOCAB_SIZE_SRC, const vector<unsigned>& dims, size_t nreplicate, size_t decoder_additiona_input_to, size_t mem_slots, cnn::real scale);
void batch_train(Model &model, Proc &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs, int nparallel, cnn::real& largest_cost, bool do_segmental_training, bool update_sgd,
bool doGradientCheck, bool b_inside_logic,
bool do_padding, int kEOS, /// do padding. if so, use kEOS as the padding symbol
bool b_use_additional_feature
);
void supervised_pretrain(Model &model, Proc &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, cnn::real target_ppl, int min_diag_id,
bool bcharlevel, bool nosplitdialogue);
void batch_train_ranking(Model &model, Proc &am, size_t max_epochs, Corpus &train_corpus, string model_out_fn,
string out_file, Dict & td, NumTurn2DialogId& train_corpusinfo, Trainer *sgd, int nparallel, int max_negative_samples);
/// adaptation using a small adaptation
void online_adaptation(Model &model, Proc &am,
const Dialogue & training, // user_input_target_response_pair,
Trainer &sgd,
const cnn::real& target_ppl, /// the target training ppl
int maxepoch, /// the maximum number of epochs
const string & updated_model_fname);
void train(Model &model, Proc &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs,
bool bcharlevel, bool nosplitdialogue);
void train(Model &model, Proc &am, TupleCorpus &training, Trainer &sgd, string out_file, int max_epochs);
void split_data_batch_train(string train_filename, Model &model, Proc &am, Corpus &devel, Trainer &sgd, string out_file, int max_epochs, int nparallel, int epochsize, bool do_segmental_training, bool do_gradient_check, bool do_padding, bool b_use_additional_feature);
void REINFORCEtrain(Model &model, Proc &am, Proc &am_agent_mirrow, Corpus &training, Corpus &devel, Trainer &sgd, string out_file, Dict & td, int max_epochs, int nparallel, cnn::real& largest_cost, cnn::real reward_baseline = 0.0, cnn::real threshold_prob_for_sampling = 1.0);
void REINFORCE_batch_train(Model &model, Proc &am, Proc &am_agent_mirrow,
Corpus &training, Corpus &devel,
Trainer &sgd, Dict& td, string out_file, int max_epochs, int nparallel, cnn::real &best, bool segmental_training,
bool sgd_update_epochs, bool do_gradient_check, bool b_inside_logic,
cnn::real reward_baseline,
cnn::real threshold_prob_for_sampling);
void split_data_batch_reinforce_train(string train_filename, Model &model,
Proc &hred, Proc & hred_agent_mirrow,
Corpus &devel,
Trainer &sgd, Dict& td,
string out_file,
string model_file_name,
int max_epochs, int nparallel, int epochsize,
cnn::real & largest_cost, cnn::real reward_baseline, cnn::real threshold_prob,
bool do_gradient_check);
/** report perplexity
@param words_s the word count in the source side
@param words_t the word count in the target side
@return entrpy loss
*/
cnn::real testPPL(Model &model, Proc &am, Corpus &devel, NumTurn2DialogId& info, string out_file, bool segmental_training, cnn::real& words_s, cnn::real& words_t);
void test(Model &model, Proc &am, Corpus &devel, string out_file, Dict & td, NumTurn2DialogId& test_corpusinfo, bool segmental_training, const string& score_embedding_fn = "");
void test_with_additional_feature(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd);
void test(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd);
void test_segmental(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd);
void test(Model &model, Proc &am, TupleCorpus &devel, string out_file, Dict & sd, Dict & td);
void testRanking(Model &, Proc &, Corpus &, Corpus &, string, Dict &, NumTurn2DialogId&, bool use_tfidf, int max_negative_samples);
void MMI_test(Proc &am, Proc& anti_am, Corpus &devel, string out_file, Dict & sd);
void sample(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd);
void dialogue(Model &model, Proc &am, string out_file, Dict & td);
void collect_sample_responses(Proc& am, Corpus &training);
void nosegmental_forward_backward(Model &model, Proc &am, PDialogue &v_v_dialogues, int nutt,
TrainingScores* scores, bool resetmodel = false, int init_turn_id = 0, Trainer* sgd = nullptr);
void segmental_forward_backward(Model &model, Proc &am, PDialogue &v_v_dialogues, int nutt, TrainingScores *scores, bool resetmodel, bool doGradientCheck = false, Trainer* sgd = nullptr);
pair<cnn::real, cnn::real> segmental_forward_backward_ranking(Model &model, Proc &am, PDialogue &v_v_dialogues, CandidateSentencesList &csls, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd);
void segmental_forward_backward_with_additional_feature(Model &model, Proc &am, PDialogue &v_v_dialogues, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd);
void REINFORCE_nosegmental_forward_backward(Model &model, Proc &am, Proc &am_mirrow, PDialogue &v_v_dialogues, int nutt,
cnn::real &dloss, cnn::real & dchars_s, cnn::real & dchars_t, Trainer* sgd, Dict& sd, cnn::real reward_baseline = 0.0, cnn::real threshold_prob_for_sampling = 1.0,
bool update_model = true);
void REINFORCE_segmental_forward_backward(Proc &am, Proc &am_mirrow, PDialogue &v_v_dialogues, int nutt, Trainer* sgd, Dict& sd, cnn::real reward_baseline, cnn::real threshold_prob_for_sampling, TrainingScores *scores, bool update_model);
public:
/// for reranking
bool MERT_tune(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd);
bool MERT_tune_edit_distance(Model &model, Proc &am, Corpus &devel, string out_file, Dict & sd, cnn::real weight_IDF=0.1);
void MERT_MMI_tune(Proc &am, Proc& anti_am, Corpus &devel, string out_file, Dict & sd);
public:
/// for test ranking candidate
/// @return a pair of numbers for top_1 and top_5 hits
pair<unsigned, unsigned> segmental_forward_ranking(Model &model, Proc &am, PDialogue &v_v_dialogues, CandidateSentencesList &, int nutt, TrainingScores *scores, bool resetmodel, bool doGradientCheck = false, Trainer* sgd = nullptr);
pair<unsigned, unsigned> segmental_forward_ranking_using_tfidf(Model &model, Proc &am, PDialogue &v_v_dialogues, CandidateSentencesList &, int nutt, TrainingScores *scores, bool resetmodel, bool doGradientCheck = false, Trainer* sgd = nullptr);
public:
/// for LDA
void lda_train(variables_map vm, const Corpus &training, const Corpus &test, Dict& sd);
void lda_test(variables_map vm, const Corpus& test, Dict& sd);
public:
/// for ngram
nGram ngram_train(variables_map vm, const Corpus& test, Dict& sd);
void ngram_clustering(variables_map vm, const Corpus& test, Dict& sd);
void ngram_one_pass_clustering(variables_map vm, const Corpus& test, Dict& sd);
void representative_presentation(
vector<nGram> pnGram,
const Sentences& responses,
Dict& sd,
vector<int>& i_data_to_cls,
vector<string>& i_representative, cnn::real interpolation_wgt);
void hierarchical_ngram_clustering(variables_map vm, const CorpusWithClassId& test, Dict& sd);
int closest_class_id(vector<nGram>& pnGram, int this_cls, int nclsInEachCluster, const Sentence& obs, cnn::real& score, cnn::real interpolation_wgt);
void ngram_sampling(int sos_sym, int eos_sym, variables_map vm, nGram& pnGram, Dict& sd);
public:
/// compute tfidf weight for all words from training data
/// dictionary or word list is given
/// TF : Term Frequency, which measures how frequently a term occurs in a document.Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones.Thus, the term frequency is often divided by the document length(aka.the total number of terms in the document) as a way of normalization :
/// TF(t, d) = (Number of times term t appears in a document d) / (Total number of terms in the document).
/// IDF : Inverse Document Frequency, which measures how important a term is.While computing TF, all terms are considered equally important.However it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance.Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following :
/// IDF(t) = log_e(Total number of documents / Number of documents with term t in it).
/// compute idf from training corpus,
/// exact tfidf score of a term needs to be computed given a sentence
void get_idf(variables_map vm, const Corpus &training, Dict& sd);
protected:
mutable vector<cnn::real> mv_idf; /// the dictionary for saving tfidf
/// the index in this vector corresponds to index in the dictionary sd
public:
vector<cnn::real> ppl_hist;
};
/**
this is fake experiment as the user side is known and supposedly respond correctly to the agent side
*/
template <class AM_t>
void TrainProcess<AM_t>::test(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & td, NumTurn2DialogId& test_corpusinfo,
bool segmental_training, const string& score_embedding_fn)
{
unsigned lines = 0;
ofstream of(out_file);
Timer iteration("completed in");
/// report BLEU score
//test(model, am, devel, out_file + "bleu", sd);
dev_set_scores->reset();
{
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
size_t ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
while (ndutt > 0)
{
if (segmental_training)
segmental_forward_backward(model, am, vd_dialogues, ndutt, dev_set_scores, false);
else
nosegmental_forward_backward(model, am, vd_dialogues, ndutt, dev_set_scores, true);
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
}
}
dev_set_scores->compute_score();
cerr << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] E = " << (dev_set_scores->dloss / dev_set_scores->twords) << " ppl=" << exp(dev_set_scores->dloss / dev_set_scores->twords) << ' ';
of << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] E = " << (dev_set_scores->dloss / dev_set_scores->twords) << " ppl=" << exp(dev_set_scores->dloss / dev_set_scores->twords) << ' ';
/// if report score in embedding space
if (score_embedding_fn.size() > 0)
{
EvaluateProcess<AM_t> * ptr_evaluate = new EvaluateProcess<AM_t>();
ptr_evaluate->readEmbedding(score_embedding_fn, td);
cnn::real emb_loss = 0;
cnn::real emb_chars_s = 0;
cnn::real emb_chars_t = 0;
cnn::real turns = 0;
for (auto & diag : devel)
{
turns += ptr_evaluate->scoreInEmbeddingSpace(am, diag, td, emb_loss, emb_chars_s, emb_chars_t);
}
cerr << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] word embedding loss = " << (emb_loss / turns) << " ppl=" << exp(emb_loss / turns) << ' ';
of << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] word embedding loss = " << (emb_loss / turns) << " ppl=" << exp(emb_loss / turns) << endl;
delete ptr_evaluate;
}
of.close();
}
/**
Test recall value
*/
template <class AM_t>
void TrainProcess<AM_t>::testRanking(Model &model, AM_t &am, Corpus &devel, Corpus &train_corpus, string out_file, Dict & td, NumTurn2DialogId& test_corpusinfo,
bool use_tfidf, int max_negative_samples)
{
unsigned lines = 0;
unsigned hits_top_1 = 0;
unsigned hits_top_5 = 0;
map<int, tuple<int, int, int>> acc_over_turn;
ofstream of(out_file);
Timer iteration("completed in");
dev_set_scores->reset();
/// get all responses from training set, these responses will be used as negative samples
Sentences negative_responses = get_all_responses(train_corpus);
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
size_t ndutt = id_sel_idx.size();
lines += ndutt * vd_dialogues.size();
long rand_pos = 0;
CandidateSentencesList csls = get_candidate_responses(vd_dialogues, negative_responses, rand_pos, max_negative_samples);
while (ndutt > 0)
{
pair<unsigned, unsigned> this_hit;
if (use_tfidf)
this_hit = segmental_forward_ranking_using_tfidf(model, am, vd_dialogues, csls, ndutt, dev_set_scores, false);
else
this_hit = segmental_forward_ranking(model, am, vd_dialogues, csls, ndutt, dev_set_scores, false);
hits_top_1 += this_hit.first;
hits_top_5 += this_hit.second;
if (acc_over_turn.find(vd_dialogues.size()) == acc_over_turn.end())
{
acc_over_turn[vd_dialogues.size()] = make_tuple(0, 0, 0);
}
get<0>(acc_over_turn[vd_dialogues.size()]) += this_hit.first;
get<1>(acc_over_turn[vd_dialogues.size()]) += this_hit.second;
get<2>(acc_over_turn[vd_dialogues.size()]) += ndutt * vd_dialogues.size();
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
ndutt = id_sel_idx.size();
lines += ndutt * vd_dialogues.size();
csls = get_candidate_responses(vd_dialogues, negative_responses, rand_pos, max_negative_samples);
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
}
for (auto iter = acc_over_turn.begin(); iter != acc_over_turn.end(); iter++)
{
auto key = iter->first;
auto t = iter->second;
cerr << "turn len :" << key << ", " << get<2>(t) << "lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100<< "%." << endl;
of << "turn len :" << key << ", " << get<2>(t) << "lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100<< "%." << endl;
}
cerr << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << ' ';
of << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << ' ';
of.close();
}
/**
Test perplexity on the corpus
*/
template <class AM_t>
cnn::real TrainProcess<AM_t>::testPPL(Model &model, AM_t &am, Corpus &devel, NumTurn2DialogId& test_corpusinfo, string out_file, bool segmental_training, cnn::real & ddchars_s, cnn::real& ddchars_t)
{
unsigned lines = 0;
unsigned si = devel.size(); /// number of dialgoues in training
if (si == 0)
return LZERO;
ofstream of(out_file, ios::app);
Timer iteration("completed in");
dev_set_scores->reset();
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
size_t ndutt = id_sel_idx.size();
while (ndutt > 0)
{
if (segmental_training)
segmental_forward_backward(model, am, vd_dialogues, ndutt, dev_set_scores, false);
else
nosegmental_forward_backward(model, am, vd_dialogues, ndutt, dev_set_scores, true);
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, test_corpusinfo);
ndutt = id_sel_idx.size();
}
dev_set_scores->compute_score();
cerr << "\n***Test [lines =" << lines << " out of total " << devel.size() << " lines ] E = " << (dev_set_scores->dloss / dev_set_scores->twords) << " ppl=" << exp(dev_set_scores->dloss / dev_set_scores->twords) << ' ';
of.close();
return dev_set_scores->dloss;
}
/** warning, the test function use the true past response as the context, when measure bleu score
?So the BLEU score is artificially high
?However, because the use input is conditioned on the past response. If using the true decoder response as the past context, the user input cannot be from the corpus.
?Therefore, it is reasonable to use the true past response as context when evaluating the model.
*/
template <class AM_t>
void TrainProcess<AM_t>::test(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
/*cnn::real idf_weight = 0.1;
cnn::real edist_weight = 0.1;*/
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
EditDistanceMetric editDistScoreRef;
ofstream of(out_file);
Timer iteration("completed in");
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
vector<string> prv_response_ref;
am.reset();
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
if (turn_id == 0)
{
if (beam_search_decode == -1)
res = am.decode(turn.first, cg, sd);
else
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
}
else
{
if (beam_search_decode == -1)
res = am.decode(prv_turn.second, turn.first, cg, sd);
else
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
}
if (turn.first.size() > 0)
{
cout << "source: ";
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.second.size() > 0)
{
cout << "ref response: ";
for (auto p : turn.second){
cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
cout << endl;
}
if (rerankIDF > 0)
{
beam_search_results = am.get_beam_decode_complete_list();
/// averaged_log_likelihood , idf_score, bleu_score
/// the goal is to rerank using averaged_log_likelihood + weight * idf_score
/// so that the top is with the highest bleu score
vector<int> best_res;
cnn::real largest_score = -10000.0;
while (!beam_search_results.empty())
{
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
cnn::real idf_score = idfScore.GetStats(turn.second, result).second;
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real edist_score = editDistScoreHyp.GetStats(prv_response, srec);
cnn::real rerank_score = (1 - weight_IDF) * lk + weight_IDF * idf_score;
rerank_score = (1 - weight_edist) * rerank_score + weight_edist * edist_score;
if (rerank_score > largest_score)
{
largest_score = rerank_score;
best_res = result;
}
beam_search_results.pop();
}
if (best_res.size() > 0)
{
srec.clear();
cout << "res response: ";
for (auto p : best_res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
idfScore.AccumulateScore(turn.second, best_res);
}
else
{
if (res.size() > 0)
{
cout << "res response: ";
for (auto p : res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
idfScore.AccumulateScore(turn.second, res);
}
bleuScore.AccumulateScore(sref, srec);
if (turn_id > 0){
editDistScoreHyp.AccumulateScore(prv_response, srec);
editDistScoreRef.AccumulateScore(prv_response_ref, sref);
}
turn_id++;
prv_turn = turn;
prv_response = srec;
prv_response_ref = sref;
}
}
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
of << "BLEU (4) score = " << sBleuScore << endl;
pair<cnn::real, cnn::real> idf_score = idfScore.GetScore();
cout << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
of << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
cnn::real edit_distance_score_ref = editDistScoreRef.GetScore();
cnn::real edit_distance_score_hyp = editDistScoreHyp.GetScore();
cout << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of.close();
}
/**
sample to generate data
*/
template <class AM_t>
void TrainProcess<AM_t>::sample(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
/*cnn::real idf_weight = 0.1;
cnn::real edist_weight = 0.1;*/
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
EditDistanceMetric editDistScoreRef;
ofstream of(out_file);
Timer iteration("completed in");
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
vector<string> prv_response_ref;
am.reset();
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
if (turn_id == 0)
{
res = am.sample(vector<int>(), turn.first, cg, sd);
}
else
{
res = am.sample(prv_turn.second, turn.first, cg, sd);
}
if (turn.first.size() > 0)
{
cout << "source: ";
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.second.size() > 0)
{
cout << "ref response: ";
for (auto p : turn.second){
cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
cout << endl;
}
if (res.size() > 0)
{
cout << "res response: ";
for (auto p : res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
idfScore.AccumulateScore(turn.second, res);
bleuScore.AccumulateScore(sref, srec);
if (turn_id > 0){
editDistScoreHyp.AccumulateScore(prv_response, srec);
editDistScoreRef.AccumulateScore(prv_response_ref, sref);
}
turn_id++;
prv_turn = turn;
prv_response = srec;
prv_response_ref = sref;
}
}
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
of << "BLEU (4) score = " << sBleuScore << endl;
pair<cnn::real, cnn::real> idf_score = idfScore.GetScore();
cout << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
of << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
cnn::real edit_distance_score_ref = editDistScoreRef.GetScore();
cnn::real edit_distance_score_hyp = editDistScoreHyp.GetScore();
cout << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of.close();
}
template <class AM_t>
void TrainProcess<AM_t>::test_with_additional_feature(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
/*cnn::real idf_weight = 0.1;
cnn::real edist_weight = 0.1;*/
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
EditDistanceMetric editDistScoreRef;
ofstream of(out_file);
Timer iteration("completed in");
for (auto diag : devel){
SentencePair prv_turn;
SentencePair prv_turn_tfidf;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
vector<string> prv_response_ref;
am.reset();
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
/// assign context
if (turn_id == 0)
prv_turn_tfidf = turn;
else{
prv_turn_tfidf.first.insert(prv_turn_tfidf.first.end(), turn.first.begin(), turn.first.end());
}
vector<cnn::real> reftfidf = ptr_tfidfScore->GetStats(prv_turn_tfidf.first);
if (turn_id == 0)
{
prv_turn_tfidf = turn;
if (beam_search_decode == -1)
res = am.decode_with_additional_feature(turn.first, reftfidf,cg, sd);
else
res = am.beam_decode_with_additional_feature(turn.first, reftfidf, cg, beam_search_decode, sd);
}
else
{
if (beam_search_decode == -1)
res = am.decode_with_additional_feature(prv_turn.second, turn.first, reftfidf, cg, sd);
else
res = am.beam_decode_with_additional_feature(prv_turn.second, turn.first, reftfidf, cg, beam_search_decode, sd);
}
if (turn.first.size() > 0)
{
cout << "source: ";
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.second.size() > 0)
{
cout << "ref response: ";
for (auto p : turn.second){
cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
cout << endl;
}
if (rerankIDF > 0)
{
beam_search_results = am.get_beam_decode_complete_list();
/// averaged_log_likelihood , idf_score, bleu_score
/// the goal is to rerank using averaged_log_likelihood + weight * idf_score
/// so that the top is with the highest bleu score
vector<int> best_res;
cnn::real largest_score = -10000.0;
while (!beam_search_results.empty())
{
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
cnn::real idf_score = idfScore.GetStats(turn.second, result).second;
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real edist_score = editDistScoreHyp.GetStats(prv_response, srec);
cnn::real rerank_score = (1 - weight_IDF) * lk + weight_IDF * idf_score;
rerank_score = (1 - weight_edist) * rerank_score + weight_edist * edist_score;
if (rerank_score > largest_score)
{
largest_score = rerank_score;
best_res = result;
}
beam_search_results.pop();
}
if (best_res.size() > 0)
{
srec.clear();
cout << "res response: ";
for (auto p : best_res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
idfScore.AccumulateScore(turn.second, best_res);
}
else
{
if (res.size() > 0)
{
cout << "res response: ";
for (auto p : res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
idfScore.AccumulateScore(turn.second, res);
}
bleuScore.AccumulateScore(sref, srec);
if (turn_id > 0){
editDistScoreHyp.AccumulateScore(prv_response, srec);
editDistScoreRef.AccumulateScore(prv_response_ref, sref);
}
turn_id++;
prv_turn = turn;
prv_turn_tfidf.first.insert(prv_turn_tfidf.first.end(), turn.second.begin(), turn.second.end());
prv_response = srec;
prv_response_ref = sref;
}
}
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
of << "BLEU (4) score = " << sBleuScore << endl;
pair<cnn::real, cnn::real> idf_score = idfScore.GetScore();
cout << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
of << "reference IDF = " << idf_score.first << " ; hypothesis IDF = " << idf_score.second << endl;
cnn::real edit_distance_score_ref = editDistScoreRef.GetScore();
cnn::real edit_distance_score_hyp = editDistScoreHyp.GetScore();
cout << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of << "average edit distance between two responses : reference: " << edit_distance_score_ref << " hypothesis: " << edit_distance_score_hyp << endl;
of.close();
}
/**
use MMI method for testing
*/
template <class AM_t>
void TrainProcess<AM_t>::MMI_test(AM_t &am, AM_t& anti_am,
Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
/*cnn::real idf_weight = 0.1;
cnn::real edist_weight = 0.1;*/
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
EditDistanceMetric editDistScoreRef;
ofstream of(out_file);
Timer iteration("completed in");
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
vector<string> prv_response_ref;
vector<vector<vector<cnn::real>>> anit_model_correct_response_state;
am.reset();
for (auto spair : diag)
{
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
{
ComputationGraph cg;
if (turn_id == 0)
{
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
}
else
{
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
}
if (turn.first.size() > 0)
{
cout << "source: ";
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.second.size() > 0)
{
cout << "ref response: ";
for (auto p : turn.second){
cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
cout << endl;
}
beam_search_results = am.get_beam_decode_complete_list();
}
vector<int> best_res;
cnn::real largest_score = -10000.0;
while (!beam_search_results.empty())
{
ComputationGraph cg;
anti_am.reset();
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
/// reverse direction
Dialogue backward_res, backward_src;
SentencePair sp_res, sp_src;
sp_src.first = result;
sp_src.second = turn.first;
backward_src.push_back(sp_src);
anti_am.build_graph(backward_res, backward_src, cg);
cnn::real anti_score = -as_scalar(cg.get_value(anti_am.s2txent.i)) / (0.0 + result.size());
cnn::real rerank_score = (1 - weight_IDF) * lk + weight_IDF * anti_score;
if (rerank_score > largest_score)
{
largest_score = rerank_score;
best_res = result;
}
beam_search_results.pop();
}
if (best_res.size() > 0)
{
srec.clear();
cout << "res response: ";
for (auto p : best_res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
else
{
cout << "error: no outputs " << endl;
}
bleuScore.AccumulateScore(sref, srec);
turn_id++;
prv_turn = turn;
prv_response = srec;
prv_response_ref = sref;
}
}
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
of << "BLEU (4) score = " << sBleuScore << endl;
of.close();
}
/**
use MERT to tune weights for MMI decoding
*/
template <class AM_t>
void TrainProcess<AM_t>::MERT_MMI_tune(AM_t &am, AM_t& anti_am,
Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
/*cnn::real idf_weight = 0.1;
cnn::real edist_weight = 0.1;*/
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
EditDistanceMetric editDistScoreRef;
ofstream of(out_file);
vector<vector<tuple<cnn::real, cnn::real, cnn::real>>> dev_set_rerank_scores;
Timer iteration("completed in");
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
vector<string> prv_response_ref;
vector<vector<vector<cnn::real>>> anit_model_correct_response_state;
am.reset();
for (auto spair : diag)
{
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
{
ComputationGraph cg;
if (turn_id == 0)
{
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
}
else
{
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
}
if (turn.second.size() > 0)
{
for (auto p : turn.second){
sref.push_back(sd.Convert(p));
}
}
beam_search_results = am.get_beam_decode_complete_list();
}
vector<int> best_res;
cnn::real largest_score = -10000.0;
vector<tuple<cnn::real, cnn::real, cnn::real>> rerank_scores;
while (!beam_search_results.empty())
{
ComputationGraph cg;
anti_am.reset();
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
/// reverse direction
Dialogue backward_res, backward_src;
SentencePair sp_res, sp_src;
sp_src.first = result;
sp_src.second = turn.first;
backward_src.push_back(sp_src);
anti_am.build_graph(backward_res, backward_src, cg);
cnn::real anti_score = -as_scalar(cg.get_value(anti_am.s2txent.i)) / (0.0 + result.size());
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real bleu_score = bleuScore.GetSentenceScore(sref, srec);
rerank_scores.push_back(make_tuple(lk, anti_score, bleu_score));
beam_search_results.pop();
}
dev_set_rerank_scores.push_back(rerank_scores);
turn_id++;
prv_turn = turn;
prv_response = srec;
prv_response_ref = sref;
}
}
/// learn a weight to IDF score
cnn::real optimal_wgt = grid_search(dev_set_rerank_scores);
of << "optimal weight to IDF score is " << optimal_wgt << endl;
of.close();
}
/**
using beam search, generated candidate lists
each list has a tuple of scores
averaged_log_likelihood , idf_score, bleu_score
the goal of tuning is to rerank using averaged_log_likelihood + weight * idf_score
so that the top is with the highest bleu score
after tuning, the weight is computed and returned
@return weights
*/
template <class AM_t>
bool TrainProcess<AM_t>::MERT_tune(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd)
{
BleuMetric bleuScore;
bleuScore.Initialize();
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreRef;
if (beam_search_decode <= 0)
{
cerr << "need beam search decoding to generate candidate lists. please set beamsearchdecode" << endl;
return false;
}
if (rerankIDF != 1)
{
cerr << "need to set rerankIDF to 1" << endl;
return false;
}
ofstream of(out_file);
Timer iteration("completed in");
map<cnn::real, cnn::real> weight_to_bleu_pair;
vector<vector<tuple<cnn::real, cnn::real, cnn::real>>> dev_set_rerank_scores;
int samples = 0;
cout << "started decoding " << endl;
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
am.reset();
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
if (turn_id == 0)
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
else
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
sref.clear();
if (turn.second.size() > 0)
{
for (auto p : turn.second){
sref.push_back(sd.Convert(p));
}
}
if (rerankIDF > 0)
{
beam_search_results = am.get_beam_decode_complete_list();
if (beam_search_results.empty())
cerr << "beam search complete list is empty " << endl;
/// averaged_log_likelihood , idf_score, bleu_score
/// the goal is to rerank using averaged_log_likelihood + weight * idf_score
/// so that the top is with the highest bleu score
vector<tuple<cnn::real, cnn::real, cnn::real>> rerank_scores;
while (!beam_search_results.empty())
{
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
cnn::real idf_score = idfScore.GetStats(turn.second, result).second;
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real bleu_score = bleuScore.GetSentenceScore(sref, srec);
beam_search_results.pop();
rerank_scores.push_back(make_tuple(lk, idf_score, bleu_score));
}
dev_set_rerank_scores.push_back(rerank_scores);
}
turn_id++;
prv_turn = turn;
}
samples++;
cout << " " << samples;
if (samples % 100 == 0)
cout << "finished " << samples / (devel.size() + 0.0) * 100 << "%" << endl;
}
cout << "completed decoding" << endl;
/// learn a weight to IDF score
cnn::real optimal_wgt = grid_search(dev_set_rerank_scores);
of << "optimal weight to IDF score is " << optimal_wgt << endl;
return true;
}
template <class AM_t>
bool TrainProcess<AM_t>::MERT_tune_edit_distance(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd, cnn::real weight_IDF)
{
BleuMetric bleuScore;
bleuScore.Initialize();
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreRef;
if (beam_search_decode <= 0)
{
cerr << "need beam search decoding to generate candidate lists. please set beamsearchdecode" << endl;
return false;
}
ofstream of(out_file);
Timer iteration("completed in");
map<cnn::real, cnn::real> weight_to_bleu_pair;
vector<vector<tuple<cnn::real, cnn::real, cnn::real, cnn::real>>> dev_set_rerank_scores;
int samples = 0;
cout << "started decoding " << endl;
for (auto diag : devel){
Timer beam_decode("beam decode completed in");
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
vector<vector<int>> res_kbest;
vector<string> prv_response;
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
if (turn_id == 0)
{
if (beam_search_decode == -1)
res = am.decode(turn.first, cg, sd);
else
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
}
else
{
if (beam_search_decode == -1)
res = am.decode(prv_turn.second, turn.first, cg, sd);
else
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
}
sref.clear();
if (turn.second.size() > 0)
{
for (auto p : turn.second){
sref.push_back(sd.Convert(p));
}
}
if (rerankIDF > 0)
{
beam_search_results = am.get_beam_decode_complete_list();
/// averaged_log_likelihood , idf_score, bleu_score
/// the goal is to rerank using averaged_log_likelihood + weight * idf_score
/// so that the top is with the highest bleu score
vector<tuple<cnn::real, cnn::real, cnn::real, cnn::real>> rerank_scores;
while (!beam_search_results.empty())
{
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
cnn::real idf_score = idfScore.GetStats(turn.second, result).second;
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real bleu_score = bleuScore.GetSentenceScore(sref, srec);
cnn::real edit_distance_score = editDistScoreRef.GetSentenceScore(prv_response, srec);
beam_search_results.pop();
rerank_scores.push_back(make_tuple(lk, idf_score, bleu_score, edit_distance_score));
}
dev_set_rerank_scores.push_back(rerank_scores);
}
turn_id++;
prv_turn = turn;
prv_response = srec;
}
samples++;
cout << " " << samples;
if (samples % 100 == 0)
cout << "finished " << samples / (devel.size() + 0.0) * 100 << endl;
}
cout << "completed decoding" << endl;
/// learn a weight to IDF score
vector<cnn::real> v_bleu_scores;
vector<cnn::real> v_wgts;
cnn::real idf_wgt = weight_IDF;
Timer MERT_tune("MERT tune completed in");
for (cnn::real edst_wgt = 0.0; edst_wgt <= 0.2; edst_wgt += 0.005)
{
v_wgts.push_back(idf_wgt);
cnn::real avg_bleu_score = 0;
for (auto t : dev_set_rerank_scores)
{
cnn::real max_combine_score = -10000.0;
int idx = -1;
int k = 0;
for (auto c : t)
{
cnn::real lk = std::get<0>(c);
cnn::real idfscore = std::get<1>(c);
cnn::real this_score = (1.0 - idf_wgt) * lk + idf_wgt * idfscore;
cnn::real edistscore = std::get<3>(c);
this_score = (1 - edst_wgt) * this_score + edst_wgt * edistscore;
if (max_combine_score < this_score)
{
max_combine_score = this_score;
idx = k;
}
k++;
}
avg_bleu_score += std::get<2>(t[idx]);
}
v_bleu_scores.push_back(avg_bleu_score / dev_set_rerank_scores.size());
}
cnn::real max_bleu_score = -10000.0;
int idx_wgt = -1;
cout << "bleu : ";
for (int k = 0; k < v_bleu_scores.size(); k++)
{
if (max_bleu_score < v_bleu_scores[k])
{
max_bleu_score = v_bleu_scores[k];
idx_wgt = k;
}
cout << v_bleu_scores[k] << " ";
}
cout << endl;
cout << "weights : ";
for (auto w : v_wgts)
cout << w << " ";
cout << endl;
cnn::real optimal_wgt = v_wgts[idx_wgt];
of << "optimal weight to IDF score is " << optimal_wgt << endl;
cout << "optimal weight to IDF score is " << optimal_wgt << endl;
return true;
}
/** warning, the test function use the true past response as the context, when measure bleu score
?So the BLEU score is artificially high
?However, because the use input is conditioned on the past response. If using the true decoder response as the past context, the user input cannot be from the corpus.
?Therefore, it is reasonable to use the true past response as context when evaluating the model.
*/
template <class AM_t>
void TrainProcess<AM_t>::test_segmental(Model &model, AM_t &am, Corpus &devel, string out_file, Dict & sd)
{
unsigned lines = 0;
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
BleuMetric bleuScore;
bleuScore.Initialize();
ofstream of(out_file);
unsigned si = devel.size(); /// number of dialgoues in training
Timer iteration("completed in");
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
for (auto diag : devel){
SentencePair prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
vector<int> res;
for (auto spair : diag)
{
ComputationGraph cg;
SentencePair turn = spair;
vector<string> sref, srec;
if (turn_id == 0)
{
if (beam_search_decode == -1)
res = am.decode(turn.first, cg, sd);
else
res = am.beam_decode(turn.first, cg, beam_search_decode, sd);
}
else
{
if (beam_search_decode == -1)
res = am.decode(prv_turn.second, turn.first, cg, sd);
else
res = am.beam_decode(prv_turn.second, turn.first, cg, beam_search_decode, sd);
}
if (turn.first.size() > 0)
{
cout << "source: ";
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.second.size() > 0)
{
cout << "ref response: ";
for (auto p : turn.second){
cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
cout << endl;
}
if (res.size() > 0)
{
cout << "res response: ";
for (auto p : res){
cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
cout << endl;
}
bleuScore.AccumulateScore(sref, srec);
turn_id++;
prv_turn = turn;
}
}
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
of << sBleuScore << endl;
of.close();
}
/**
Test on the tuple corpus
output recognition results for each test
not using perplexity to report progresses
*/
template <class AM_t>
void TrainProcess<AM_t>::test(Model &model, AM_t &am, TupleCorpus &devel, string out_file, Dict & sd, Dict & td)
{
unsigned lines = 0;
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
ofstream of(out_file);
unsigned si = devel.size(); /// number of dialgoues in training
Timer iteration("completed in");
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
for (auto diag : devel){
SentenceTuple prv_turn;
size_t turn_id = 0;
/// train on two segments of a dialogue
ComputationGraph cg;
vector<int> res;
for (auto spair : diag){
SentenceTuple turn = spair;
if (turn_id == 0)
res = am.decode_tuple(turn, cg, sd, td);
else
res = am.decode_tuple(prv_turn, turn, cg, sd, td);
if (turn.first.size() > 0)
{
for (auto p : turn.first){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (turn.last.size() > 0)
{
for (auto p : turn.last){
cout << sd.Convert(p) << " ";
}
cout << endl;
}
if (res.size() > 0)
{
for (auto p : res){
cout << td.Convert(p) << " ";
}
cout << endl;
}
turn_id++;
prv_turn = turn;
}
}
of.close();
}
template <class AM_t>
void TrainProcess<AM_t>::dialogue(Model &model, AM_t &am, string out_file, Dict & td)
{
string shuman;
ofstream of(out_file);
IDFMetric idfScore(mv_idf);
EditDistanceMetric editDistScoreHyp;
int d_idx = 0;
while (1){
cout << "please start dialogue with the agent. you can end this dialogue by typing exit " << endl;
size_t t_idx = 0;
vector<int> decode_output;
vector<int> shuman_input;
Sentence prv_response;
vector<string> prv_response_str;
ComputationGraph cg;
while (1){
#ifdef INPUT_UTF8
std::getline(wcin, shuman);
if (shuman.find(L"exit") == 0)
break;
#else
std::getline(cin, shuman);
if (shuman.find("exit") == 0)
break;
#endif
shuman = "<s> " + shuman + " </s>";
convertHumanQuery(shuman, shuman_input, td);
vector<string> sref, srec;
priority_queue<Hypothesis, vector<Hypothesis>, CompareHypothesis> beam_search_results;
if (t_idx == 0)
{
if (beam_search_decode == -1)
decode_output = am.decode(shuman_input, cg, td);
else
decode_output = am.beam_decode(shuman_input, cg, beam_search_decode, td);
}
else
{
if (beam_search_decode == -1)
decode_output = am.decode(prv_response, shuman_input, cg, td);
else
decode_output = am.beam_decode(prv_response, shuman_input, cg, beam_search_decode, td);
}
if (rerankIDF > 0)
{
beam_search_results = am.get_beam_decode_complete_list();
cnn::real largest_score = -10000.0;
while (!beam_search_results.empty())
{
vector<int> result = beam_search_results.top().target;
cnn::real lk = beam_search_results.top().cost;
cnn::real idf_score = idfScore.GetStats(result, result).second;
srec.clear();
for (auto p : result){
srec.push_back(sd.Convert(p));
}
cnn::real edit_distance_score = 0;
if (t_idx > 0)
edit_distance_score = editDistScoreHyp.GetSentenceScore(prv_response_str, srec);
beam_search_results.pop();
cnn::real score_combine_idf_lk = weight_IDF * idf_score + (1 - weight_IDF) * lk;
cnn::real comb_score = (1 - weight_edist) * score_combine_idf_lk + weight_edist * edit_distance_score;
if (comb_score > largest_score)
{
largest_score = comb_score;
decode_output = result;
}
}
}
of << "res ||| " << d_idx << " ||| " << t_idx << " ||| ";
for (auto pp : shuman_input)
{
of << td.Convert(pp) << " ";
}
of << " ||| ";
for (auto pp : decode_output)
{
of << td.Convert(pp) << " ";
}
of << endl;
cout << "Agent: ";
prv_response_str.clear();
for (auto pp : decode_output)
{
cout << td.Convert(pp) << " ";
prv_response_str.push_back(td.Convert(pp));
}
cout << endl;
prv_response = decode_output;
t_idx++;
}
d_idx++;
of << endl;
}
of.close();
}
/**
inspired by the following two papers
Sequence level training with recurrent neural networks http://arxiv.org/pdf/1511.06732v3.pdf
Minimum risk training for neural machine translation http://arxiv.org/abs/1512.02433
use decoded responses as targets. start this process from the last turn, and then gradually move to earlier turns.
this is also for implementation convenience.
/// initially alwasy use the xent, later on, with probability p, use the decoded response as target, but weight it
/// with a reward from BLEU
/// this probability is increased from 0 to 1.0.
/// two avoid different scaling, should apply decoding to all incoming sentences or otherwise, all use xent training
/// with probability p, decode an input
vector<int> response = s2tmodel_sim.decode(insent, cg);
/// evaluate the response to get BLEU score
/// subtract the BLEU score with a baseline number
/// the scalar is the reward signal
/// the target responses: some utterances are with true responses and the others are with decoded responses
*/
template <class AM_t>
void TrainProcess<AM_t>::REINFORCE_nosegmental_forward_backward(Model &model, AM_t &am, AM_t &am_mirrow, PDialogue &v_v_dialogues, int nutt,
cnn::real &dloss, cnn::real & dchars_s, cnn::real & dchars_t, Trainer* sgd, Dict& sd, cnn::real reward_baseline, cnn::real threshold_prob_for_sampling, bool update_model)
{
size_t turn_id = 0;
size_t i_turns = 0;
PTurn prv_turn, new_turn, new_prv_turn;
BleuMetric bleuScore;
bleuScore.Initialize();
IDFMetric idfScore(mv_idf);
bool do_sampling = false;
cnn::real rng_value = rand() / (RAND_MAX + 0.0);
if (rng_value >= threshold_prob_for_sampling)
{
do_sampling = true;
}
ComputationGraph cg;
am.reset();
am_mirrow.reset();
/// train on two segments of a dialogue
vector<Sentence> res;
vector<Expression> v_errs; /// the errors to be minimized
vector<cnn::real> v_bleu_score;
vector<Expression> i_err;
for (auto &turn : v_v_dialogues)
{
if (do_sampling)
{
vector<Sentence> v_input, v_prv_response;
v_bleu_score.clear();
for (auto& p : turn)
{
v_input.push_back(p.first);
}
for (auto&p : prv_turn)
{
v_prv_response.push_back(p.second);
}
if (turn_id == 0)
{
res = am_mirrow.batch_decode(v_input, cg, sd);
}
else
{
res = am_mirrow.batch_decode(v_prv_response, v_input, cg, sd);
}
size_t k = 0;
for (auto &q : res)
{
if (reinforceIDF <= 0)
{
vector<string> sref, srec;
if (verbose) cout << "ref response: ";
for (auto p : turn[k].second){
if (verbose) cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
if (verbose) cout << endl;
srec.clear();
if (verbose) cout << "res response: ";
for (auto p : q){
if (verbose) cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
if (verbose) cout << endl;
cnn::real score;
score = bleuScore.GetSentenceScore(sref, srec);
v_bleu_score.push_back(score);
}
else
{
vector<int> sref, srec;
if (verbose) cout << "ref response: ";
for (auto p : turn[k].second){
if (verbose) cout << sd.Convert(p) << " ";
sref.push_back(p);
}
if (verbose) cout << endl;
srec.clear();
if (verbose) cout << "res response: ";
for (auto p : q){
if (verbose) cout << sd.Convert(p) << " ";
srec.push_back(p);
}
if (verbose) cout << endl;
cnn::real score;
score = idfScore.GetSentenceScore(sref, srec).second;
v_bleu_score.push_back(score);
}
k++;
}
new_turn = turn;
for (size_t k = 0; k < nutt; k++)
{
new_turn[k].second = res[k];
}
/// get errors from the decoded results
if (turn_id == 0)
{
i_err = am.build_graph(new_turn, cg);
}
else
{
i_err = am.build_graph(new_prv_turn, new_turn, cg);
}
}
else{
/// get errors from the true reference
if (turn_id == 0)
{
i_err = am.build_graph(turn, cg);
}
else
{
i_err = am.build_graph(prv_turn, turn, cg);
}
}
if (do_sampling)
{
for (size_t k = 0; k < nutt; k++)
{
Expression t_err = i_err[k];
v_errs.push_back(t_err * (v_bleu_score[k] - reward_baseline)); /// multiply with reward
}
}
else
{
for (auto &p : i_err)
v_errs.push_back(p);
}
prv_turn = turn;
new_prv_turn = new_turn;
turn_id++;
i_turns++;
}
Expression i_total_err = sum(v_errs);
dloss += as_scalar(cg.get_value(i_total_err));
dchars_s += am.swords;
dchars_t += am.twords;
if (sgd != nullptr && update_model)
{
cg.backward();
sgd->update(am.twords);
}
}
template <class AM_t>
void TrainProcess<AM_t>::REINFORCE_segmental_forward_backward(AM_t &am, AM_t &am_mirrow, PDialogue &v_v_dialogues, int nutt, Trainer* sgd, Dict& sd, cnn::real reward_baseline, cnn::real threshold_prob_for_sampling, TrainingScores *scores, bool update_model)
{
size_t turn_id = 0;
size_t i_turns = 0;
PTurn prv_turn, new_turn, new_prv_turn;
BleuMetric bleuScore;
bleuScore.Initialize();
IDFMetric idfScore(mv_idf);
bool do_sampling = false;
cnn::real rng_value = rand() / (RAND_MAX + 0.0);
if (rng_value >= threshold_prob_for_sampling)
{
do_sampling = true;
}
am.reset();
am_mirrow.reset();
/// train on two segments of a dialogue
vector<cnn::real> v_bleu_score;
for (auto &turn : v_v_dialogues)
{
if (do_sampling)
{
vector<Sentence> res;
ComputationGraph cg_sampling;
vector<Sentence> v_input, v_prv_response;
v_bleu_score.clear();
for (auto& p : turn)
{
v_input.push_back(p.first);
}
for (auto&p : prv_turn)
{
v_prv_response.push_back(p.second);
}
if (turn_id == 0)
{
res = am_mirrow.batch_decode(v_input, cg_sampling, sd);
}
else
{
res = am_mirrow.batch_decode(v_prv_response, v_input, cg_sampling, sd);
}
size_t k = 0;
for (auto &q : res)
{
if (reinforceIDF <= 0)
{
vector<string> sref, srec;
if (verbose) cout << "ref response: ";
for (auto p : turn[k].second){
if (verbose) cout << sd.Convert(p) << " ";
sref.push_back(sd.Convert(p));
}
if (verbose) cout << endl;
srec.clear();
if (verbose) cout << "res response: ";
for (auto p : q){
if (verbose) cout << sd.Convert(p) << " ";
srec.push_back(sd.Convert(p));
}
if (verbose) cout << endl;
cnn::real score;
score = bleuScore.GetSentenceScore(sref, srec);
v_bleu_score.push_back(score);
}
else
{
vector<int> sref, srec;
if (verbose) cout << "ref response: ";
for (auto p : turn[k].second){
if (verbose) cout << sd.Convert(p) << " ";
sref.push_back(p);
}
if (verbose) cout << endl;
srec.clear();
if (verbose) cout << "res response: ";
for (auto p : q){
if (verbose) cout << sd.Convert(p) << " ";
srec.push_back(p);
}
if (verbose) cout << endl;
cnn::real score;
score = idfScore.GetSentenceScore(sref, srec).second;
v_bleu_score.push_back(score);
}
k++;
}
/// use the decoded results as training signals
/// reward uses either BLEU or IDF scores. these scores are associated with the decoded results
/// training will encourage high BLEU or IDF scores, given these decoded results
/// notice that BLEU score is a measure against true reference. therefore, the higher the BLEU
/// score, the closer or the better the decoded sequence is aligned to the reference.
/// however, for IDF score, the highest IDF scores may correspond to many rare words that
/// don't make sense.
/// therefore, for IDF reward, we should use true reference as training signal
/// in this case, the system is trained with references that have larger IDF values
new_turn = turn;
if (reinforceIDF <= 0)
{
/// this corresponds to using BLEU score, so should use decoded signal to encourage
/// learn from decoded context in order to generate high BLEU score outputs
for (size_t k = 0; k < nutt; k++)
{
new_turn[k].second = res[k];
}
}
else
{
/// need to keep using the reference signals, as
/// high IDF doesn't mean good outputs
}
}
/// graph for learning
ComputationGraph cg;
vector<Expression> v_errs; /// the errors to be minimized
vector<Expression> i_err;
/// get errors from the decoded results
if (do_sampling)
{
if (turn_id == 0)
{
i_err = am.build_graph(new_turn, cg);
}
else
{
i_err = am.build_graph(new_prv_turn, new_turn, cg);
}
}
else{
/// get errors from the true reference
if (turn_id == 0)
{
i_err = am.build_graph(turn, cg);
}
else
{
i_err = am.build_graph(prv_turn, turn, cg);
}
}
if (do_sampling)
{
for (size_t k = 0; k < nutt; k++)
{
Expression t_err = i_err[k];
v_errs.push_back(t_err * (v_bleu_score[k] - reward_baseline)); /// multiply with reward
}
}
else
{
for (auto &p : i_err)
v_errs.push_back(p);
}
Expression i_total_err = sum(v_errs);
Tensor tv = cg.get_value(i_total_err);
if (sgd != nullptr && update_model)
{
cg.backward();
sgd->update(am.twords);
}
prv_turn = turn;
new_prv_turn = new_turn;
turn_id++;
i_turns++;
TensorTools::PushElementsToMemory(scores->training_score_current_location,
scores->training_score_buf_size,
scores->training_scores,
tv);
scores->swords += am.swords;
scores->twords += am.twords;
}
}
template <class AM_t>
void TrainProcess<AM_t>::nosegmental_forward_backward(Model &model, AM_t &am, PDialogue &v_v_dialogues, int nutt, TrainingScores* scores, bool resetmodel, int init_turn_id, Trainer* sgd)
{
size_t turn_id = init_turn_id;
size_t i_turns = 0;
PTurn prv_turn;
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
for (auto turn : v_v_dialogues)
{
if (turn_id == 0)
{
am.build_graph(turn, cg);
}
else
{
am.build_graph(prv_turn, turn, cg);
}
// CheckGrad(model, cg);
prv_turn = turn;
turn_id++;
i_turns++;
}
Tensor tv = cg.get_value(am.s2txent.i);
TensorTools::PushElementsToMemory(scores->training_score_current_location,
scores->training_score_buf_size,
scores->training_scores, tv);
scores->swords += am.swords;
scores->twords += am.twords;
if (sgd != nullptr)
{
cg.backward();
sgd->update(am.twords);
}
}
template <class AM_t>
void TrainProcess<AM_t>::segmental_forward_backward(Model &model, AM_t &am, PDialogue &v_v_dialogues, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd)
{
size_t turn_id = 0;
size_t i_turns = 0;
PTurn prv_turn;
if (verbose)
cout << "start segmental_forward_backward" << endl;
for (auto turn : v_v_dialogues)
{
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
if (turn_id == 0)
{
am.build_graph(turn, cg);
}
else
{
am.build_graph(prv_turn, turn, cg);
}
if (verbose) cout << "after graph build" << endl;
if (doGradientCheck
&& turn_id > 3 // do gradient check after burn-in
)
CheckGrad(model, cg);
Tensor tv = cg.get_value(am.s2txent.i);
TensorTools::PushElementsToMemory(scores->training_score_current_location,
scores->training_score_buf_size,
scores->training_scores,
tv);
if (sgd != nullptr)
{
if (verbose)
cout << " start backprop " << endl;
cg.backward();
if (verbose)
cout << " done backprop " << endl;
sgd->update(am.twords);
if (verbose)
cout << " done update" << endl;
}
scores->swords += am.swords;
scores->twords += am.twords;
prv_turn = turn;
turn_id++;
i_turns++;
}
}
template <class AM_t>
void TrainProcess<AM_t>::segmental_forward_backward_with_additional_feature(Model &model, AM_t &am, PDialogue &v_v_dialogues, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd)
{
size_t turn_id = 0;
size_t i_turns = 0;
PTurn prv_turn;
PTurn prv_turn_tfidf;
if (verbose)
cout << "start segmental_forward_backward" << endl;
for (auto turn : v_v_dialogues)
{
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
/// assign context
if (prv_turn_tfidf.size() == 0)
prv_turn_tfidf = turn;
else{
for (int u = 0; u < nutt; u++)
{
prv_turn_tfidf[u].first.insert(prv_turn_tfidf[u].first.end(), turn[u].first.begin(), turn[u].first.end());
}
}
vector<vector<cnn::real>> reftfidf_context;
for (int u = 0; u < nutt; u++)
{
vector<cnn::real> reftfidf = ptr_tfidfScore->GetStats(prv_turn_tfidf[u].first);
normalize(reftfidf);
reftfidf_context.push_back(reftfidf);
}
if (turn_id == 0)
{
am.build_graph(turn, reftfidf_context, cg);
}
else
{
am.build_graph(prv_turn, turn, reftfidf_context, cg);
}
if (verbose) cout << "after graph build" << endl;
Tensor tv = cg.get_value(am.s2txent.i);
TensorTools::PushElementsToMemory(scores->training_score_current_location,
scores->training_score_buf_size,
scores->training_scores,
tv);
if (doGradientCheck
&& turn_id > 3 // do gradient check after burn-in
)
CheckGrad(model, cg);
if (sgd != nullptr)
{
if (verbose)
cout << " start backprop " << endl;
cg.backward();
if (verbose)
cout << " done backprop " << endl;
sgd->update(am.twords);
if (verbose)
cout << " done update" << endl;
}
scores->swords += am.swords;
scores->twords += am.twords;
/// append this turn to context
for (int i = 0; i < nutt; i++)
{
prv_turn_tfidf[i].first.insert(prv_turn_tfidf[i].first.end(), turn[i].second.begin(), turn[i].second.end());
}
prv_turn = turn;
turn_id++;
i_turns++;
}
}
/**
return hit at rank0 (top-1) and hit within rank4 (top-5)
*/
template <class AM_t>
pair<cnn::real, cnn::real> TrainProcess<AM_t>::segmental_forward_backward_ranking(Model &model, AM_t &am, PDialogue &v_v_dialogues, CandidateSentencesList &csls, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd)
{
size_t turn_id = 0;
size_t i_turns = 0;
unsigned hits_top_5 = 0, hits_top_1 = 0;
size_t num_candidate = csls[0].size();
IDFMetric idfScore(mv_idf);
PTurn prv_turn;
if (verbose)
cout << "start segmental_forward_backward" << endl;
vector<vector<cnn::real>> correct_response_state;
vector<vector<cnn::real>> prv_turn_correct_response_state;
vector<vector<cnn::real>> costs(nutt, vector<cnn::real>(0));
vector<vector<cnn::real>> correct_response_costs(nutt, vector<cnn::real>(0));
size_t idx = 0;
for (auto turn : v_v_dialogues)
{
auto turn_back = turn;
/// first compute likelihoods from the correct paths
{
vector<Expression> v_errs;
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
if (turn_id == 0)
{
v_errs = am.build_graph(turn, cg);
}
else
{
am.copy_external_memory_to_cxt(cg, nutt, prv_turn_correct_response_state); /// reset state to that coresponding to the correct response history for negative responses
/// because this turn is dependent on the previous turn that is with the correct response
v_errs = am.build_graph(prv_turn, turn, cg);
}
for (size_t err_idx = 0; err_idx < v_errs.size(); err_idx++)
{
Tensor tv = cg.get_value(v_errs[err_idx]);
cnn::real lc = TensorTools::AccessElement(tv, 0) / turn[err_idx].second.size();
cnn::real score = lc;
correct_response_costs[err_idx].push_back(score);
}
}
/// compute positive and negative sample's likelihoods
for (int i = 0; i < num_candidate + 1; i++)
{
vector<Expression> v_errs;
if (i < num_candidate)
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = csls[idx][i];
}
else
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = turn_back[ii].second;
}
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
if (turn_id == 0)
{
v_errs = am.build_graph(turn, cg);
}
else
{
am.copy_external_memory_to_cxt(cg, nutt, prv_turn_correct_response_state); /// reset state to that coresponding to the correct response history for negative responses
/// because this turn is dependent on the previous turn that is with the correct response
v_errs = am.build_graph(prv_turn, turn, cg);
}
if (verbose) cout << "after graph build" << endl;
for (size_t err_idx = 0; err_idx < v_errs.size(); err_idx++)
{
Tensor tv = cg.get_value(v_errs[err_idx]);
cnn::real score = TensorTools::AccessElement(tv, 0) / turn[err_idx].second.size();
costs[err_idx].push_back(score);
}
if (sgd != nullptr)
{
/// compute average cost differences
cnn::real cost_penalty = 0;
int ndif = 0;
for (size_t kk = 0; kk < v_errs.size(); kk++)
{
if (verbose)
cout << "c: " << correct_response_costs[kk].back() << " neg: " << costs[kk].back() << ' ';
cnn::real dif = correct_response_costs[kk].back() - costs[kk].back();
if (dif > 0)
{
ndif++;
cost_penalty += dif;
}
}
if (verbose)
cout << endl;
if (cost_penalty > 0 || i == num_candidate)
{
if (verbose)
cout << " start backprop " << endl;
cg.backward();
if (verbose)
cout << " done backprop " << endl;
cnn::real reward = 0.0;
if (cost_penalty > 0 && i != num_candidate && num_candidate > 0)
{
reward = - ndif / ((cnn::real)v_errs.size()); /// the penalty is proportional to the number of
/// samples that have lower cost than the positive sample.
reward /= (cnn::real)num_candidate; /// normalize to the number of candidate
/// so that if all are wrong, the scale it adds is at most -1.0
}
else if (i==num_candidate)
{
assert(i == num_candidate);
reward = 1.0; /// this is the case of positive sample
}
if (verbose)
cout << "update model using reward " << cost_penalty << endl;
sgd->update(am.twords, reward); /// reinforce learning
if (verbose)
cout << " done update" << endl;
}
else{
if (verbose)
cout << "no need to update models" << endl;
}
}
if (i == num_candidate)
{
/// this is the context with the correct responses history
am.serialise_cxt_to_external_memory(cg, correct_response_state);
}
}
prv_turn_correct_response_state = correct_response_state;
for (size_t i = 0; i < costs.size(); i++)
{
vector<size_t> sorted_idx = sort_indexes<cnn::real>(costs[i]);
vector<size_t>::iterator iter = find(sorted_idx.begin(), sorted_idx.end(), num_candidate);
if (distance(iter, sorted_idx.end()) == 1)
{
hits_top_1++;
}
if (distance(iter, sorted_idx.end()) <= 5)
{
hits_top_5++;
}
}
prv_turn = turn;
turn_id++;
i_turns++;
idx++;
}
return make_pair(hits_top_1, hits_top_5);
}
/**
return hit at rank0 (top-1) and hit within rank4 (top-5)
reftfidf_context*/
template <class AM_t>
pair<unsigned, unsigned> TrainProcess<AM_t>::segmental_forward_ranking(Model &model, AM_t &am, PDialogue &v_v_dialogues, CandidateSentencesList &csls, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd)
{
size_t turn_id = 0;
size_t i_turns = 0;
unsigned hits_top_5 = 0, hits_top_1 = 0;
size_t num_candidate = MAX_NUMBER_OF_CANDIDATES;
vector<Expression> v_errs;
IDFMetric idfScore(mv_idf);
PTurn prv_turn;
PTurn prv_turn_tfidf;
if (verbose)
cout << "start segmental_forward_backward" << endl;
/// the negative candidate number should match to that expected
assert(MAX_NUMBER_OF_CANDIDATES == csls[0].size());
vector<vector<cnn::real>> correct_response_state;
vector<vector<cnn::real>> prv_turn_correct_response_state;
size_t idx = 0;
for (auto turn : v_v_dialogues)
{
auto turn_back = turn;
vector<vector<cnn::real>> costs(nutt, vector<cnn::real>(0));
vector<vector<cnn::real>> reftfidf_context;
/// assign context
if (weight_IDF > 0)
{
if (prv_turn_tfidf.size() == 0)
prv_turn_tfidf = turn;
else{
for (int u = 0; u < nutt; u++)
{
prv_turn_tfidf[u].first.insert(prv_turn_tfidf[u].first.end(), turn[u].first.begin(), turn[u].first.end());
}
}
for (int u = 0; u < nutt; u++)
{
vector<cnn::real> reftfidf = ptr_tfidfScore->GetStats(prv_turn_tfidf[u].first);
reftfidf_context.push_back(reftfidf);
}
}
for (int i = 0; i < num_candidate + 1; i++)
{
if (i < num_candidate)
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = csls[idx][i];
}
else
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = turn_back[ii].second;
}
ComputationGraph cg;
if (resetmodel)
{
am.reset();
}
if (turn_id == 0)
{
am.copy_external_memory_to_cxt(cg, nutt, prv_turn_correct_response_state);
v_errs = am.build_graph(turn, cg);
}
else
{
am.copy_external_memory_to_cxt(cg, nutt, prv_turn_correct_response_state); /// reset state to that coresponding to the correct response history for negative responses
/// because this turn is dependent on the previous turn that is with the correct response
v_errs = am.build_graph(prv_turn, turn, cg);
}
if (verbose) cout << "after graph build" << endl;
for (size_t err_idx = 0; err_idx < v_errs.size(); err_idx++)
{
Tensor tv = cg.get_value(v_errs[err_idx]);
cnn::real lc = TensorTools::AccessElement(tv, 0) / turn[err_idx].second.size();
cnn::real score = lc;
if (weight_IDF > 0.0 && ptr_tfidfScore != nullptr)
{
vector<cnn::real> hyptfidf = ptr_tfidfScore->GetStats(turn[err_idx].second);
/// compute cosine similarity
cnn::real sim = cnn::metric::cosine_similarity(reftfidf_context[err_idx], hyptfidf);
score = (1 - weight_IDF) * lc - weight_IDF * sim;
}
#ifdef RANKING_COMBINE_IDF
cnn::real idf_score = idfScore.GetStats(turn[err_idx].first, turn[err_idx].second).second / turn[err_idx].second.size();
score = (1 - weight_IDF) * lc - weight_IDF * idf_score;
#endif
costs[err_idx].push_back(score);
}
if (i == num_candidate)
{
/// this is the context with the correct responses history
am.serialise_cxt_to_external_memory(cg, correct_response_state);
}
}
prv_turn_correct_response_state = correct_response_state;
for (size_t i = 0; i < costs.size(); i++)
{
vector<size_t> sorted_idx = sort_indexes<cnn::real>(costs[i]);
vector<size_t>::iterator iter = find(sorted_idx.begin(), sorted_idx.end(), num_candidate);
if (distance(iter, sorted_idx.end()) == 1)
{
hits_top_1++;
}
if (distance(iter, sorted_idx.end()) <= 5)
{
hits_top_5++;
}
}
if (weight_IDF > 0.0 && ptr_tfidfScore != nullptr)
{
for (int u = 0; u < nutt; u++)
{
prv_turn_tfidf[u].first.insert(prv_turn_tfidf[u].first.end(), turn[u].second.begin(), turn[u].second.end());
}
}
prv_turn = turn;
turn_id++;
i_turns++;
idx++;
}
return make_pair(hits_top_1, hits_top_5);
}
/**
return hit at rank0 (top-1) and hit within rank4 (top-5)
using tf-idf
*/
template <class AM_t>
pair<unsigned, unsigned> TrainProcess<AM_t>::segmental_forward_ranking_using_tfidf(Model &model, AM_t &am, PDialogue &v_v_dialogues, CandidateSentencesList &csls, int nutt, TrainingScores * scores, bool resetmodel, bool doGradientCheck, Trainer* sgd)
{
size_t turn_id = 0;
size_t i_turns = 0;
unsigned hits_top_5 = 0, hits_top_1 = 0;
size_t num_candidate = MAX_NUMBER_OF_CANDIDATES;
TFIDFMetric tfidfScore(mv_idf, sd.size());
PTurn prv_turn;
if (verbose)
cout << "start segmental_forward_backward" << endl;
/// the negative candidate number should match to that expected
assert(MAX_NUMBER_OF_CANDIDATES == csls[0].size());
size_t idx = 0;
for (auto turn : v_v_dialogues)
{
auto turn_back = turn;
vector<vector<cnn::real>> costs(nutt, vector<cnn::real>(0));
/// assign context
if (prv_turn.size() == 0)
prv_turn= turn;
else{
for (int u = 0; u < nutt; u++)
{
prv_turn[u].first.insert(prv_turn[u].first.end(), turn[u].first.begin(), turn[u].first.end());
}
}
/// all candidates have the same context
vector<vector<cnn::real>> reftfidf_context;
for (int u = 0; u < nutt; u++)
{
vector<cnn::real> reftfidf = ptr_tfidfScore->GetStats(prv_turn[u].first);
reftfidf_context.push_back(reftfidf);
}
for (int i = 0; i < num_candidate + 1; i++)
{
if (i < num_candidate)
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = csls[idx][i];
}
else
{
for (size_t ii = 0; ii < nutt; ii++)
turn[ii].second = turn_back[ii].second;
}
for (int u = 0; u < nutt; u++)
{
vector<cnn::real> hyptfidf = ptr_tfidfScore->GetStats(turn[u].second);
/// compute cosine similarity
cnn::real sim = cnn::metric::cosine_similarity(reftfidf_context[u], hyptfidf);
cnn::real score = -sim; /// negative of similarity is cost
costs[u].push_back(score);
}
}
for (size_t i = 0; i < costs.size(); i++)
{
vector<size_t> sorted_idx = sort_indexes<cnn::real>(costs[i]);
vector<size_t>::iterator iter = find(sorted_idx.begin(), sorted_idx.end(), num_candidate);
if (distance(iter, sorted_idx.end()) == 1)
{
hits_top_1++;
}
if (distance(iter, sorted_idx.end()) <= 5)
{
hits_top_5++;
}
}
/// append this turn to context
for (int i = 0; i < nutt; i++)
{
prv_turn[i].first.insert(prv_turn[i].first.end(), turn[i].second.begin(), turn[i].second.end());
}
turn_id++;
i_turns++;
idx++;
}
return make_pair(hits_top_1, hits_top_5);
}
/**
Train with REINFORCE algorithm
*/
template <class AM_t>
void TrainProcess<AM_t>::REINFORCEtrain(Model &model, AM_t &am, AM_t &am_agent_mirrow, Corpus &training, Corpus &devel, Trainer &sgd, string out_file, Dict & td, int max_epochs, int nparallel, cnn::real& largest_cost, cnn::real reward_baseline, cnn::real threshold_prob_for_sampling)
{
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
threshold_prob_for_sampling = min<cnn::real>(1.0, max<cnn::real>(0.0, threshold_prob_for_sampling)); /// normalize to [0.0, 1.0]
bool first = true;
int report = 0;
unsigned lines = 0;
save_cnn_model(out_file, &model);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
while (sgd.epoch < max_epochs) {
Timer iteration("completed in");
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
for (unsigned iter = 0; iter < report_every_i;) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else { sgd.update_epoch(); }
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
/// shuffle number of turns
shuffle(training_numturn2did.vNumTurns.begin(), training_numturn2did.vNumTurns.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
for (auto p : training_numturn2did.mapNumTurn2DialogId){
/// shuffle dailogues with the same number of turns
random_shuffle(p.second.begin(), p.second.end());
}
v_selected.assign(training.size(), false);
}
Dialogue prv_turn;
PDialogue v_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> i_sel_idx = get_same_length_dialogues(training, nparallel, i_stt_diag_id, v_selected, v_dialogues, training_numturn2did);
size_t nutt = i_sel_idx.size();
REINFORCE_nosegmental_forward_backward(model, am, am_agent_mirrow, v_dialogues, nutt, dloss, dchars_s, dchars_t, &sgd, td, reward_baseline, threshold_prob_for_sampling);
si += nutt;
lines += nutt;
iter += nutt;
}
sgd.status();
cerr << "\n***Train [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
// show score on dev data?
report++;
if (floor(sgd.epoch) != prv_epoch || report % dev_every_i_reports == 0 || fmod(lines, (cnn::real)training.size()) == 0.0) {
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
size_t ndutt = id_sel_idx.size();
while (ndutt > 0)
{
/// the cost is -(r - r_baseline) * log P
/// for small P, but with large r, the cost is high, so to reduce it, it generates large gradient as this event corresponds to low probability but high reward
REINFORCE_nosegmental_forward_backward(model, am, am_agent_mirrow, vd_dialogues, ndutt, ddloss, ddchars_s, ddchars_t, nullptr, td, reward_baseline, 0.0, false);
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
ndutt = id_sel_idx.size();
}
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < largest_cost) {
largest_cost = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
cerr << "\n***DEV [epoch=" << (lines / (cnn::real)training.size()) << "] cost = " << (ddloss / ddchars_t) << " approximate ppl=" << exp(ddloss / ddchars_t) << ' ';
}
prv_epoch = floor(sgd.epoch);
}
}
/* the following does mutiple sentences per minibatch
*/
template <class AM_t>
void TrainProcess<AM_t>::REINFORCE_batch_train(Model &model, AM_t &am, AM_t &am_agent_mirrow,
Corpus &training, Corpus &devel,
Trainer &sgd, Dict& td, string out_file, int max_epochs, int nparallel, cnn::real &best, bool segmental_training,
bool sgd_update_epochs, bool do_gradient_check, bool b_inside_logic,
cnn::real reward_baseline,
cnn::real threshold_prob_for_sampling
)
{
if (verbose)
cout << "batch_train: ";
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
bool first = true;
int report = 0;
unsigned lines = 0;
if (b_inside_logic)
reset_smoothed_ppl(ppl_hist);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
/// if no update of sgd in this function, need to train with all data in one pass and then return
if (sgd_update_epochs == false)
{
report_every_i = training.size();
si = 0;
}
while ((sgd_update_epochs && sgd.epoch < max_epochs) || /// run multiple passes of data
(!sgd_update_epochs && si < training.size())) /// run one pass of the data
{
Timer iteration("completed in");
training_set_scores->reset();
PDialogue v_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
for (unsigned iter = 0; iter < report_every_i;) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else if (sgd_update_epochs){
sgd.update_epoch();
lines -= training.size();
}
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
/// shuffle number of turns
shuffle(training_numturn2did.vNumTurns.begin(), training_numturn2did.vNumTurns.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
for (auto p : training_numturn2did.mapNumTurn2DialogId){
/// shuffle dailogues with the same number of turns
random_shuffle(p.second.begin(), p.second.end());
}
v_selected.assign(training.size(), false);
}
Dialogue prv_turn;
vector<int> i_sel_idx = get_same_length_dialogues(training, nparallel, i_stt_diag_id, v_selected, v_dialogues, training_numturn2did);
size_t nutt = i_sel_idx.size();
if (nutt == 0)
break;
if (verbose)
{
cerr << "selected " << nutt << " : ";
for (auto p : i_sel_idx)
cerr << p << " ";
cerr << endl;
}
REINFORCE_segmental_forward_backward(am, am_agent_mirrow, v_dialogues, nutt, &sgd, td, reward_baseline, threshold_prob_for_sampling, training_set_scores, true);
si += nutt;
lines += nutt;
iter += nutt;
}
training_set_scores->compute_score();
sgd.status();
iteration.WordsPerSecond(training_set_scores->twords + training_set_scores->swords);
cerr << "\n***Train " << (lines / (cnn::real)training.size()) * 100 << " %100 of epoch[" << sgd.epoch << "] E = " << (training_set_scores->dloss / training_set_scores->twords) << " ppl=" << exp(training_set_scores->dloss / training_set_scores->twords) << ' ';
vector<SentencePair> vs;
for (auto&p : v_dialogues)
vs.push_back(p[0]);
vector<SentencePair> vres;
am.respond(vs, vres, sd);
// show score on dev data?
report++;
if (b_inside_logic && devel.size() > 0 && (floor(sgd.epoch) != prv_epoch
|| (report % dev_every_i_reports == 0
|| fmod(lines, (cnn::real)training.size()) == 0.0)))
{
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
ddloss = testPPL(model, am, devel, devel_numturn2did, out_file + ".dev.log", segmental_training, ddchars_s, ddchars_t);
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < best) {
best = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
cerr << "\n***DEV [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (ddloss / ddchars_t) << " ppl=" << exp(ddloss / ddchars_t) << ' ';
}
prv_epoch = floor(sgd.epoch);
if (sgd_update_epochs == false)
{
/// because there is no update on sgd epoch, this loop can run forever.
/// so just run one iteration and quit
break;
}
else{
save_cnn_model(out_file + "e" + boost::lexical_cast<string>(sgd.epoch), &model);
}
}
}
/* the following does mutiple sentences per minibatch
@ b_inside_logic : use logic inside of batch to do evaluation on the dev set. if it is false, do dev set evaluation only if sgd.epoch changes
*/
template <class AM_t>
void TrainProcess<AM_t>::batch_train(Model &model, AM_t &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs, int nparallel, cnn::real &best, bool segmental_training,
bool sgd_update_epochs, bool do_gradient_check, bool b_inside_logic,
bool b_do_padding, int kEOS, /// for padding if so use kEOS as the padding symbol
bool b_use_additional_feature
)
{
if (verbose)
cout << "batch_train: ";
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
bool first = true;
int report = 0;
unsigned lines = 0;
if (b_inside_logic)
reset_smoothed_ppl(ppl_hist);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
/// if no update of sgd in this function, need to train with all data in one pass and then return
if (sgd_update_epochs == false)
{
report_every_i = training.size();
si = 0;
}
while ((sgd_update_epochs && sgd.epoch < max_epochs) || /// run multiple passes of data
(!sgd_update_epochs && si < training.size())) /// run one pass of the data
{
Timer iteration("completed in");
training_set_scores->reset();
PDialogue v_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
for (unsigned iter = 0; iter < report_every_i;) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else if (sgd_update_epochs){
sgd.update_epoch();
lines -= training.size();
}
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
/// shuffle number of turns
shuffle(training_numturn2did.vNumTurns.begin(), training_numturn2did.vNumTurns.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
for (auto p : training_numturn2did.mapNumTurn2DialogId){
/// shuffle dailogues with the same number of turns
random_shuffle(p.second.begin(), p.second.end());
}
v_selected.assign(training.size(), false);
}
Dialogue prv_turn;
vector<int> i_sel_idx = get_same_length_dialogues(training, nparallel, i_stt_diag_id, v_selected, v_dialogues, training_numturn2did);
size_t nutt = i_sel_idx.size();
if (nutt == 0)
break;
if (b_do_padding)
{
/// padding all input and output in each turn into same length with </s> symbol
/// padding </s> to the front for source side
/// padding </s> to the back for target side
PDialogue pd = padding_with_eos(v_dialogues, kEOS, { false, true });
v_dialogues = pd;
}
if (verbose)
{
cerr << "selected " << nutt << " : ";
for (auto p : i_sel_idx)
cerr << p << " ";
cerr << endl;
}
if (b_use_additional_feature)
{
segmental_forward_backward_with_additional_feature(model, am, v_dialogues, nutt, training_set_scores, false, do_gradient_check, &sgd);
}
else
{
if (segmental_training)
segmental_forward_backward(model, am, v_dialogues, nutt, training_set_scores, false, do_gradient_check, &sgd);
else
nosegmental_forward_backward(model, am, v_dialogues, nutt, training_set_scores, true, 0, &sgd);
}
si += nutt;
lines += nutt;
iter += nutt;
}
training_set_scores->compute_score();
sgd.status();
iteration.WordsPerSecond(training_set_scores->twords + training_set_scores->swords);
cerr << "\n***Train " << (lines / (cnn::real)training.size()) * 100 << " %100 of epoch[" << sgd.epoch << "] E = " << (training_set_scores->dloss / training_set_scores->twords) << " ppl=" << exp(training_set_scores->dloss / training_set_scores->twords) << ' ';
vector<SentencePair> vs;
for (auto&p : v_dialogues)
vs.push_back(p[0]);
vector<SentencePair> vres;
am.respond(vs, vres, sd);
// show score on dev data?
report++;
if (b_inside_logic && devel.size() > 0 && (floor(sgd.epoch) != prv_epoch
|| (report % dev_every_i_reports == 0
|| fmod(lines, (cnn::real)training.size()) == 0.0)))
{
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
ddloss = testPPL(model, am, devel, devel_numturn2did, out_file + ".dev.log", segmental_training, ddchars_s, ddchars_t);
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < best) {
best = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
cerr << "\n***DEV [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (ddloss / ddchars_t) << " ppl=" << exp(ddloss / ddchars_t) << ' ';
}
prv_epoch = floor(sgd.epoch);
if (sgd_update_epochs == false)
{
/// because there is no update on sgd epoch, this loop can run forever.
/// so just run one iteration and quit
break;
}
else{
save_cnn_model(out_file + "e" + boost::lexical_cast<string>(sgd.epoch), &model);
}
}
}
/**
train ranking models
*/
template <class AM_t>
void TrainProcess<AM_t>::batch_train_ranking(Model &model, AM_t &am, size_t max_epochs, Corpus &train_corpus, string model_out_fn, string out_file, Dict & td, NumTurn2DialogId& train_corpusinfo, Trainer *sgd, int nparallel, int max_negative_samples)
{
if (train_corpus.size() == 0)
{
cerr << "no data for training" << endl;
return;
}
unsigned lines = 0;
unsigned hits_top_1 = 0;
unsigned hits_top_5 = 0;
map<int, tuple<int, int, int>> acc_over_turn;
ofstream of(out_file);
int ilines_check_point = 0;
Timer iteration("completed in");
dev_set_scores->reset();
/// get all responses from training set, these responses will be used as negative samples
Sentences negative_responses = get_all_responses(train_corpus);
vector<bool> vd_selected(train_corpus.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(train_corpus, nparallel, id_stt_diag_id, vd_selected, vd_dialogues, train_corpusinfo);
size_t ndutt = id_sel_idx.size();
lines += ndutt * vd_dialogues.size();
long rand_pos = 100; /// avoid using the same starting point as that in test so that no overlaps between
/// training and test responses candidate sequences
CandidateSentencesList csls = get_candidate_responses(vd_dialogues, negative_responses, rand_pos, max_negative_samples);
int train_epoch = 0;
while (train_epoch < max_epochs)
{
hits_top_1 = 0;
hits_top_5 = 0;
acc_over_turn.clear();
while (ndutt > 0)
{
pair<unsigned, unsigned> this_hit;
this_hit = segmental_forward_backward_ranking(model, am, vd_dialogues, csls, ndutt, dev_set_scores, false, false, sgd);
hits_top_1 += this_hit.first;
hits_top_5 += this_hit.second;
if (acc_over_turn.find(vd_dialogues.size()) == acc_over_turn.end())
{
acc_over_turn[vd_dialogues.size()] = make_tuple(0, 0, 0);
}
get<0>(acc_over_turn[vd_dialogues.size()]) += this_hit.first;
get<1>(acc_over_turn[vd_dialogues.size()]) += this_hit.second;
get<2>(acc_over_turn[vd_dialogues.size()]) += ndutt * vd_dialogues.size();
id_sel_idx = get_same_length_dialogues(train_corpus, nparallel, id_stt_diag_id, vd_selected, vd_dialogues, train_corpusinfo);
ndutt = id_sel_idx.size();
lines += ndutt * vd_dialogues.size();
csls = get_candidate_responses(vd_dialogues, negative_responses, rand_pos, max_negative_samples);
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
ilines_check_point += ndutt * vd_dialogues.size();
if (ilines_check_point > 50000)
{
save_cnn_model(model_out_fn + ".e" + boost::lexical_cast<string>(train_epoch) + ".ln" + boost::lexical_cast<string>(lines), &model);
ilines_check_point = 0;
for (auto iter = acc_over_turn.begin(); iter != acc_over_turn.end(); iter++)
{
auto key = iter->first;
auto t = iter->second;
cerr << "turn len : " << key << ", " << get<2>(t) << " lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100 << "%." << endl;
of << "turn len : " << key << ", " << get<2>(t) << " lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100 << "%." << endl;
}
cerr << "epoch " << train_epoch << "\n***Test [lines =" << lines << " out of total dialogues " << train_corpus.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << endl;
of << "epoch " << train_epoch << "\n***Test [lines =" << lines << " out of total dialogues " << train_corpus.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << endl;
}
}
for (auto iter = acc_over_turn.begin(); iter != acc_over_turn.end(); iter++)
{
auto key = iter->first;
auto t = iter->second;
cerr << "turn len :" << key << ", " << get<2>(t) << "lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100 << "%." << endl;
of << "turn len :" << key << ", " << get<2>(t) << "lines, R@1 " << get<0>(t) / (get<2>(t) +0.0) * 100 << "%., R@5 " << get<1>(t) / (get<2>(t) +0.0) * 100 << "%." << endl;
}
cerr << "epoch " << train_epoch << "\n***Test [lines =" << lines << " out of total dialogues " << train_corpus.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << endl;
of << "epoch " << train_epoch << "\n***Test [lines =" << lines << " out of total dialogues " << train_corpus.size() << " lines ] 1 in" << (MAX_NUMBER_OF_CANDIDATES + 1) << " R@1 " << hits_top_1 / (lines + 0.0) *100.0 << "%." << " R@5 " << hits_top_5 / (lines + 0.0) *100.0 << "%." << endl;
sgd->update_epoch();
save_cnn_model(model_out_fn, &model);
cerr << "**SHUFFLE\n";
shuffle(training_numturn2did.vNumTurns.begin(), training_numturn2did.vNumTurns.end(), *rndeng);
id_stt_diag_id = 0;
vd_selected = vector<bool>(train_corpus.size(), false);
for (auto p : training_numturn2did.mapNumTurn2DialogId){
/// shuffle dailogues with the same number of turns
random_shuffle(p.second.begin(), p.second.end());
}
vd_selected.assign(train_corpus.size(), false);
train_epoch++;
id_sel_idx = get_same_length_dialogues(train_corpus, nparallel, id_stt_diag_id, vd_selected, vd_dialogues, train_corpusinfo);
ndutt = id_sel_idx.size();
lines = ndutt * vd_dialogues.size();
}
of.close();
}
/**
@bcharlevel : true if character output; default false.
*/
template <class AM_t>
void TrainProcess<AM_t>::train(Model &model, AM_t &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs, bool bcharlevel, bool nosplitdialogue)
{
cnn::real best = std::numeric_limits<cnn::real>::max();
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
boost::mt19937 rng; // produces randomness out of thin air
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
bool first = true;
int report = 0;
unsigned lines = 0;
save_cnn_model(out_file, &model);
reset_smoothed_ppl(ppl_hist);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
while (sgd.epoch < max_epochs) {
Timer iteration("completed in");
training_set_scores->reset();
dev_set_scores->reset();
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
for (unsigned iter = 0; iter < report_every_i; ++iter) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else { sgd.update_epoch(); }
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
shuffle(order.begin(), order.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
}
// build graph for this instance
auto& spair = training[order[si % order.size()]];
if (verbose)
cerr << "diag = " << order[si % order.size()] << endl;
/// find portion to train
// see random number distributions
auto rng = std::bind(std::uniform_int_distribution<int>(0, spair.size() - 1), *rndeng);
int i_turn_to_train = rng();
if (nosplitdialogue)
i_turn_to_train = 99999;
vector<SentencePair> prv_turn;
size_t turn_id = 0;
size_t i_init_turn = 0;
/// train on two segments of a dialogue
do{
ComputationGraph cg;
if (i_init_turn > 0)
am.assign_cxt(cg, 1);
for (size_t t = i_init_turn; t <= std::min(i_init_turn + i_turn_to_train, spair.size() - 1); t++)
{
SentencePair turn = spair[t];
vector<SentencePair> i_turn(1, turn);
if (turn_id == 0)
{
am.build_graph(i_turn, cg);
}
else
{
am.build_graph(prv_turn, i_turn, cg);
}
turn_id++;
if (verbose)
{
display_value(am.s2txent, cg);
cnn::real tcxtent = as_scalar(cg.get_value(am.s2txent));
cerr << "xent = " << tcxtent << " nobs = " << am.twords << " PPL = " << exp(tcxtent / am.twords) << endl;
}
prv_turn = i_turn;
if (t == i_init_turn + i_turn_to_train || (t == spair.size() - 1)){
dloss += as_scalar(cg.get_value(am.s2txent.i));
dchars_s += am.swords;
dchars_t += am.twords;
cg.backward();
sgd.update(am.twords);
am.serialise_cxt(cg);
i_init_turn = t + 1;
i_turn_to_train = spair.size() - i_init_turn;
break;
}
}
} while (i_init_turn < spair.size());
if (iter == report_every_i - 1)
am.respond(spair, sd, bcharlevel);
++si;
lines++;
}
sgd.status();
cerr << "\n***Train [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
// show score on dev data?
report++;
if (floor(sgd.epoch) != prv_epoch || report % dev_every_i_reports == 0 || fmod(lines, (cnn::real)training.size()) == 0.0) {
dev_set_scores->reset();
{
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
size_t ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
while (ndutt > 0)
{
nosegmental_forward_backward(model, am, vd_dialogues, ndutt, dev_set_scores, true);
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
}
}
dev_set_scores->compute_score();
cnn::real ddloss = smoothed_ppl(dev_set_scores->dloss, ppl_hist);
if (ddloss < best) {
best = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
cerr << "\n***DEV [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (dev_set_scores->dloss / dev_set_scores->twords) << " ppl=" << exp(dev_set_scores->dloss / dev_set_scores->twords) << ' ';
}
prv_epoch = floor(sgd.epoch);
}
}
/**
Training process on tuple corpus
*/
template <class AM_t>
void TrainProcess<AM_t>::train(Model &model, AM_t &am, TupleCorpus &training, Trainer &sgd, string out_file, int max_epochs)
{
cnn::real best = 9e+99;
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
boost::mt19937 rng; // produces randomness out of thin air
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
bool first = true;
int report = 0;
unsigned lines = 0;
int epoch = 0;
save_cnn_model(out_file, &model);
reset_smoothed_ppl(ppl_hist);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
while (sgd.epoch < max_epochs) {
Timer iteration("completed in");
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
cnn::real dchars_tt = 0;
for (unsigned iter = 0; iter < report_every_i; ++iter) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else { sgd.update_epoch(); }
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
shuffle(order.begin(), order.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
}
// build graph for this instance
auto& spair = training[order[si % order.size()]];
if (verbose)
cerr << "diag = " << order[si % order.size()] << endl;
/// find portion to train
bool b_trained = false;
// see random number distributions
auto rng = std::bind(std::uniform_int_distribution<int>(0, spair.size() - 1), *rndeng);
int i_turn_to_train = rng();
vector<SentenceTuple> prv_turn;
size_t turn_id = 0;
size_t i_init_turn = 0;
/// train on two segments of a dialogue
ComputationGraph cg;
size_t t = 0;
do{
if (i_init_turn > 0)
am.assign_cxt(cg, 1);
SentenceTuple turn = spair[t];
vector<SentenceTuple> i_turn(1, turn);
if (turn_id == 0)
{
am.build_graph(i_turn, cg);
}
else
{
am.build_graph(prv_turn, i_turn, cg);
}
turn_id++;
t++;
prv_turn = i_turn;
} while (t < spair.size());
dloss += as_scalar(cg.get_value(am.s2txent.i));
dchars_s += am.swords;
dchars_t += am.twords;
// CheckGrad(model, cg);
cg.backward();
sgd.update(am.twords);
if (verbose)
cerr << "\n***Train [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
++si;
lines++;
}
sgd.status();
cerr << "\n***Train [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
if (fmod(lines, (cnn::real)training.size()) == 0)
{
cnn::real i_ppl = smoothed_ppl(exp(dloss / dchars_t), ppl_hist);
if (best > i_ppl)
{
best = i_ppl;
save_cnn_model(out_file, &model);
}
else
{
sgd.eta0 *= 0.5;
sgd.eta *= 0.5;
}
}
prv_epoch = floor(sgd.epoch);
}
}
/**
collect sample responses
*/
template <class AM_t>
void TrainProcess<AM_t>::collect_sample_responses(AM_t& am, Corpus &training)
{
am.clear_candidates();
for (auto & ds : training){
vector<SentencePair> prv_turn;
for (auto& spair : ds){
SentencePair turn = spair;
am.collect_candidates(spair.second);
}
}
}
/**
overly pre-train models on small subset of the data
*/
template <class AM_t>
void TrainProcess<AM_t>::supervised_pretrain(Model &model, AM_t &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, cnn::real target_ppl, int min_diag_id,
bool bcharlevel = false, bool nosplitdialogue = false)
{
cnn::real best = std::numeric_limits<cnn::real>::max();
unsigned report_every_i = 50;
unsigned si = training.size(); /// number of dialgoues in training
boost::mt19937 rng; // produces randomness out of thin air
reset_smoothed_ppl(ppl_hist);
size_t sample_step = 100;
size_t maxepoch = sample_step * 10; /// no point of using more than 100 epochs, which correspond to use full data with 10 epochs for pre-train
vector<unsigned> order(training.size() / sample_step);
size_t k = 0;
for (unsigned i = 0; i < training.size(); i += sample_step)
{
if (k < order.size())
order[k++] = i;
else
break;
}
bool first = true;
unsigned lines = 0;
save_cnn_model(out_file, &model);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
while (best > target_ppl && sgd.epoch < maxepoch) {
Timer iteration("completed in");
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
for (unsigned iter = 0; iter < report_every_i; ++iter) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else { sgd.update_epoch(); }
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
shuffle(order.begin(), order.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(order.size(), false);
}
// build graph for this instance
auto& spair = training[order[si % order.size()] + min_diag_id];
if (verbose)
cerr << "diag = " << order[si % order.size()] + min_diag_id << endl;
/// find portion to train
// see random number distributions
auto rng = std::bind(std::uniform_int_distribution<int>(0, spair.size() - 1), *rndeng);
int i_turn_to_train = rng();
if (nosplitdialogue)
i_turn_to_train = 99999;
vector<SentencePair> prv_turn;
size_t turn_id = 0;
size_t i_init_turn = 0;
/// train on two segments of a dialogue
do{
ComputationGraph cg;
if (i_init_turn > 0)
am.assign_cxt(cg, 1);
for (size_t t = i_init_turn; t <= std::min(i_init_turn + i_turn_to_train, spair.size() - 1); t++)
{
SentencePair turn = spair[t];
vector<SentencePair> i_turn(1, turn);
if (turn_id == 0)
{
am.build_graph(i_turn, cg);
}
else
{
am.build_graph(prv_turn, i_turn, cg);
}
turn_id++;
if (verbose)
{
display_value(am.s2txent, cg);
cnn::real tcxtent = as_scalar(cg.get_value(am.s2txent));
cerr << "xent = " << tcxtent << " nobs = " << am.twords << " PPL = " << exp(tcxtent / am.twords) << endl;
}
prv_turn = i_turn;
if (t == i_init_turn + i_turn_to_train || (t == spair.size() - 1)){
dloss += as_scalar(cg.get_value(am.s2txent.i));
dchars_s += am.swords;
dchars_t += am.twords;
cg.backward();
sgd.update(am.twords);
am.serialise_cxt(cg);
i_init_turn = t + 1;
i_turn_to_train = spair.size() - i_init_turn;
break;
}
}
} while (i_init_turn < spair.size());
if (iter == report_every_i - 1)
am.respond(spair, sd, bcharlevel);
++si;
lines++;
}
sgd.status();
cerr << "\n***Train [epoch=" << (lines / (cnn::real)order.size()) << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
prv_epoch = floor(sgd.epoch);
cnn::real i_ppl = smoothed_ppl(exp(dloss / dchars_t), ppl_hist);
if (best > i_ppl)
{
best = i_ppl;
}
else
{
sgd.eta0 *= 0.5;
sgd.eta *= 0.5;
}
if (sgd.eta < 1e-10)
{
cerr << "SGD stepsize is too small to update models" << endl;
break;
}
}
save_cnn_model(out_file, &model);
save_cnn_model(out_file + ".pretrained", &model);
}
/**
online adaptation of an existing model
using only one sentence pair usually
in contrast, offline adaptation needs a corpus
*/
template <class AM_t>
void TrainProcess<AM_t>::online_adaptation(Model &model, AM_t &am,
const Dialogue & training, // user_input_target_response_pair,
Trainer &sgd, const cnn::real& target_ppl,
int maxepoch,
const string & updated_model_fname)
{
cnn::real best = 9e+99;
PDialogue ptraining;
for (auto p : training)
{
PTurn pt(1);
pt[0] = p;
ptraining.push_back(pt);
}
while (best > target_ppl && sgd.epoch < maxepoch) {
Timer iteration("completed in");
training_set_scores->reset();
segmental_forward_backward(model, am, ptraining, 1, training_set_scores, false, false, &sgd);
sgd.status();
training_set_scores->compute_score();
cnn::real i_ppl = exp(training_set_scores->dloss / training_set_scores->twords);
cerr << "\n***Train epoch[" << sgd.epoch << "] E = " << (training_set_scores->dloss / training_set_scores->twords) << " ppl=" << i_ppl << ' ';
if (best > i_ppl)
{
best = i_ppl;
}
else
{
sgd.eta0 *= 0.5;
sgd.eta *= 0.5;
}
if (sgd.eta < 1e-10)
{
cerr << "SGD stepsize is too small to update models" << endl;
break;
}
sgd.update_epoch();
}
if (updated_model_fname.size() > 0)
save_cnn_model(updated_model_fname, &model);
}
/**
since the tool loads data into memory and that can cause memory exhaustion, this function do sampling of data for each epoch.
*/
template <class AM_t>
void TrainProcess<AM_t>::split_data_batch_train(string train_filename, Model &model, AM_t &am, Corpus &devel,
Trainer &sgd, string out_file,
int max_epochs, int nparallel, int epochsize, bool segmental_training,
bool do_gradient_check, bool do_padding, bool b_use_additional_feature)
{
cnn::real largest_cost = std::numeric_limits<cnn::real>::max();
cnn::real largest_dev_cost = std::numeric_limits<cnn::real>::max();
reset_smoothed_ppl(ppl_hist);
DataReader dr(train_filename);
int trial = 0;
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
Corpus training = dr.corpus();
training_numturn2did = get_numturn2dialid(training);
save_cnn_model(out_file, &model);
while (sgd.epoch < max_epochs)
{
Timer this_epoch("this epoch completed in");
batch_train(model, am, training, devel, sgd, out_file, 1, nparallel, largest_cost, segmental_training, false, do_gradient_check, false, do_padding, kSRC_EOS, b_use_additional_feature);
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
training = dr.corpus();
training_numturn2did = get_numturn2dialid(training);
if (training.size() == 0)
{
dr.restart();
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
training = dr.corpus(); /// copy the data from data thread to the data to be used in the main thread
training_numturn2did = get_numturn2dialid(training);
//#define DEBUG
#ifndef DEBUG
save_cnn_model(out_file + ".i" + boost::lexical_cast<string>(sgd.epoch), &model);
#endif
sgd.update_epoch();
#ifndef DEBUG
if (devel.size() > 0)
{
cnn::real ddloss, ddchars_s, ddchars_t;
ddloss = testPPL(model, am, devel, devel_numturn2did, out_file + ".dev.log", segmental_training, ddchars_s, ddchars_t);
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < largest_dev_cost) {
/// save the model with the best performance on the dev set
largest_dev_cost = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
}
#endif
}
trial++;
}
}
/**
since the tool loads data into memory and that can cause memory exhaustion, this function do sampling of data for each epoch.
*/
template <class AM_t>
void TrainProcess<AM_t>::split_data_batch_reinforce_train(string train_filename, Model &model,
AM_t &hred , AM_t& hred_agent_mirrow,
Corpus &devel,
Trainer &sgd, Dict& td,
string out_file, string model_file_name,
int max_epochs, int nparallel, int epochsize,
cnn::real & largest_cost, cnn::real reward_baseline, cnn::real threshold_prob,
bool do_gradient_check)
{
long total_diags = 0;
cnn::real largest_dev_cost = std::numeric_limits<cnn::real>::max();
reset_smoothed_ppl(ppl_hist);
DataReader dr(train_filename);
int trial = 0;
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
Corpus training = dr.corpus();
training_numturn2did = get_numturn2dialid(training);
if (training.size() == 0)
{
cerr << "no content for " << train_filename << endl;
throw("no content for training file");
}
while (sgd.epoch < max_epochs)
{
Timer this_epoch("this epoch completed in");
REINFORCE_batch_train(model, hred, hred_agent_mirrow,
training, devel, sgd, td, out_file, 1, nparallel, largest_cost, false, false, do_gradient_check, false,
reward_baseline, threshold_prob);
total_diags += training.size();
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
training = dr.corpus();
training_numturn2did = get_numturn2dialid(training);
/// save models for every batch of data
save_cnn_model(model_file_name + ".i" + boost::lexical_cast<string>(sgd.epoch) + ".d" + boost::lexical_cast<string>(total_diags), &model);
if (training.size() == 0)
{
dr.restart();
dr.read_corpus(sd, kSRC_SOS, kSRC_EOS, epochsize);
training = dr.corpus(); /// copy the data from data thread to the data to be used in the main thread
training_numturn2did = get_numturn2dialid(training);
sgd.update_epoch();
#ifndef DEBUG
if (devel.size() > 0)
{
cnn::real ddloss, ddchars_s, ddchars_t;
ddloss = testPPL(model, hred, devel, devel_numturn2did, out_file + ".dev.log", false, ddchars_s, ddchars_t);
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < largest_dev_cost) {
/// save the model with the best performance on the dev set
largest_dev_cost = ddloss;
save_cnn_model(model_file_name, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
}
#endif
}
trial++;
}
}
template <class AM_t>
void TrainProcess<AM_t>::get_idf(variables_map vm, const Corpus &training, Dict& sd)
{
long l_total_terms = 0;
long l_total_nb_documents = 0;
tWordid2TfIdf idf;
for (auto & d : training)
{
for (auto &sp : d)
{
Sentence user = sp.first;
Sentence resp = sp.second;
l_total_terms += user.size();
l_total_terms += resp.size();
l_total_nb_documents += 2;
tWordid2TfIdf occurence;
for (auto& u : user)
{
occurence[u] = 1;
}
for (auto& u : resp)
{
occurence[u] = 1;
}
tWordid2TfIdf::iterator iter;
for (iter = occurence.begin(); iter != occurence.end(); iter++)
idf[iter->first] += 1;
}
}
mv_idf.resize(sd.size(), 0);
tWordid2TfIdf::iterator it;
for (it = idf.begin(); it != idf.end(); it++)
{
cnn::real idf_val = it->second;
idf_val = log(l_total_nb_documents / idf_val);
int id = it->first;
cnn::real idfscore = idf_val;
mv_idf[id] = idfscore;
}
ptr_tfidfScore = new TFIDFMetric(mv_idf, sd.size());
}
/**
@bcharlevel : true if character output; default false.
*/
template <class AM_t>
void TrainProcess<AM_t>::lda_train(variables_map vm, const Corpus &training, const Corpus& test, Dict& sd)
{
ldaModel * pLda = new ldaModel(training, test);
pLda->init(vm);
pLda->read_data(training, sd, test);
pLda->train();
pLda->save_ldaModel_topWords(vm["lda-model"].as<string>() + ".topic.words", sd);
pLda->load_ldaModel(-1);
pLda->test(sd);
delete[] pLda;
}
/**
@bcharlevel : true if character output; default false.
*/
template <class AM_t>
void TrainProcess<AM_t>::lda_test(variables_map vm, const Corpus& test, Dict& sd)
{
Corpus empty;
ldaModel * pLda = new ldaModel(empty, test);
pLda->init(vm);
pLda->load_ldaModel(vm["lda-final-model"].as<string>());
pLda->read_data(empty, sd, test);
pLda->test(sd);
delete[] pLda;
}
/**
unconditional n-gram language sampling.
*/
template <class AM_t>
void TrainProcess<AM_t>::ngram_sampling(int sos_sym, int eos_sym, variables_map vm, nGram& pnGram, Dict& sd)
{
std::vector<int> response;
std::vector<string> str_response;
BleuMetric bleuScore;
if (vm.count("ngram_order") > 0)
bleuScore.Initialize(vm);
else
bleuScore.Initialize();
pnGram.Sampling(sos_sym, eos_sym, sd, response, str_response);
string str = "hi , thanks for visiting answer desk ! i 'm xxpersonxx";
vector<string> sref;
boost::split(sref, str, boost::algorithm::is_any_of(" "));
bleuScore.AccumulateScore(sref, str_response);
string sBleuScore = bleuScore.GetScore();
cout << "BLEU (4) score = " << sBleuScore << endl;
}
/**
train n-gram model
*/
template <class AM_t>
nGram TrainProcess<AM_t>::ngram_train(variables_map vm, const Corpus& test, Dict& sd)
{
Corpus empty;
nGram pnGram = nGram();
pnGram.Initialize(vm);
for (auto & t : test)
{
for (auto & s : t)
{
pnGram.UpdateNgramCounts(s.second, 0, sd);
pnGram.UpdateNgramCounts(s.second, 1, sd);
}
}
pnGram.ComputeNgramModel();
pnGram.SaveModel();
return pnGram;
}
/**
cluster using n-gram model
random shuffle training data and use the first half to train ngram model
after several iterations, which have their log-likelihood reported, the ngram model assign a class id for each sentence
*/
template <class AM_t>
void TrainProcess<AM_t>::ngram_clustering(variables_map vm, const Corpus& test, Dict& sd)
{
Corpus empty;
int ncls = vm["ngram-num-clusters"].as<int>();
vector<nGram> pnGram(ncls);
for (auto& p : pnGram)
p.Initialize(vm);
cnn::real interpolation_wgt = vm["interpolation_wgt"].as<cnn::real>();
/// flatten corpus
Sentences order_kept_responses, response;
flatten_corpus(test, order_kept_responses, response);
order_kept_responses = response; ///
vector<long> ncnt(ncls, 0); /// every class must have at least one sample
int icnt = 0;
for (int iter = 0; iter < vm["epochs"].as<int>(); iter++)
{
if (iter == 0)
{
shuffle(response.begin(), response.end(), std::default_random_engine(iter));
for (int i = 0; i < ncls; i++)
{
pnGram[i].LoadModel(".m" + boost::lexical_cast<string>(i));
}
for (int i = 0; i < response.size(); i++)
{
/// every class has at least one sample
int cls;
if (icnt < ncls)
{
if (ncnt[icnt] == 0)
cls = icnt++;
else
cls = rand0n_uniform(ncls - 1);
}
else
cls = rand0n_uniform(ncls - 1);
pnGram[cls].UpdateNgramCounts(response[i], 0, sd);
ncnt[cls] ++;
}
#pragma omp parallel for
for (int i = 0; i < ncls; i++)
{
pnGram[i].ComputeNgramModel();
pnGram[i].SaveModel(".m" + boost::lexical_cast<string>(i));
}
std::fill(ncnt.begin(), ncnt.end(), 0);
}
else
{
/// use half the data to update centroids
/// reassign data to closest cluster
vector<Sentences> current_assignment(ncls);
double totallk = 0;
for (int i = 0; i < order_kept_responses.size(); i++)
{
if (order_kept_responses[i].size() == 0)
continue;
cnn::real largest;
int iarg = closest_class_id(pnGram, 0, ncls, order_kept_responses[i], largest, interpolation_wgt);
current_assignment[iarg].push_back(order_kept_responses[i]);
totallk += largest / order_kept_responses[i].size();
ncnt[iarg]++;
}
totallk /= order_kept_responses.size();
cout << "loglikelihood at iteration " << iter << " is " << totallk << endl;
/// check if all clusters have at least one sample
{
int icls = 0;
for (auto &p : ncnt)
{
if (p < MIN_OCC_COUNT)
{
/// randomly pick one sample for this class
current_assignment[icls].push_back(response[rand0n_uniform(order_kept_responses.size()) - 1]);
}
icls++;
}
}
std::fill(ncnt.begin(), ncnt.end(), 0);
///update cluster
#pragma omp parallel for
for (int i = 0; i < ncls; i++)
pnGram[i].Clear();
for (int i = 0; i < current_assignment.size(); i++)
{
for (auto & p : current_assignment[i])
{
pnGram[i].UpdateNgramCounts(p, 0, sd);
pnGram[i].UpdateNgramCounts(p, 1, sd);
}
}
#pragma omp parallel for
for (int i = 0; i < ncls; i++)
{
pnGram[i].ComputeNgramModel();
pnGram[i].SaveModel(".m" + boost::lexical_cast<string>(i));
}
}
}
vector<int> i_data_to_cls;
vector<string> i_represenative;
representative_presentation(pnGram, order_kept_responses, sd, i_data_to_cls, i_represenative, interpolation_wgt);
/// do classification now
ofstream ofs;
if (vm.count("outputfile") > 0)
ofs.open(vm["outputfile"].as<string>());
long did = 0;
long idx = 0;
for (auto& t : test)
{
int tid = 0;
for (auto& s : t)
{
long iarg = i_data_to_cls[idx++];
string userstr;
for (auto& p : s.first)
userstr = userstr + " " + sd.Convert(p);
string responsestr;
for (auto& p : s.second)
responsestr = responsestr + " " + sd.Convert(p);
string ostr = boost::lexical_cast<string>(did)+" ||| " + boost::lexical_cast<string>(tid)+" ||| " + userstr + " ||| " + responsestr;
ostr = ostr + " ||| " + boost::lexical_cast<string>(iarg)+" ||| " + i_represenative[iarg];
if (ofs.is_open())
{
ofs << ostr << endl;
}
else
cout << ostr << endl;
tid++;
}
did++;
}
if (ofs.is_open())
ofs.close();
}
/**
keep turn id
each turn has its own clusters
ngram counts are not reliable. so use edit distance
*/
template <class AM_t>
void TrainProcess<AM_t>::ngram_one_pass_clustering(variables_map vm, const Corpus& test, Dict& sd)
{
#define MAXTURNS 100
Corpus empty;
int ncls = vm["ngram-num-clusters"].as<int>();
vector<int> data2cls;
vector<cnn::real> cls2score; /// the highest score in this class
vector<Sentence> cls2data; /// class to its closest response
vector<int> cls_cnt;
vector<nGram> pnGram;
cnn::real threshold = vm["llkthreshold"].as<cnn::real>();
cnn::real interpolation_wgt = vm["interpolation_wgt"].as<cnn::real>();
vector<long> ncnt(MAXTURNS, 0); /// every class must have at least one sample
long sid = 0;
for (auto& d : test)
{
for (auto &t : d)
{
Sentence rep = remove_first_and_last(t.second);
cnn::real largest = LZERO;
int iarg;
if (pnGram.size() > 0)
iarg = closest_class_id(pnGram, 0, pnGram.size(), rep, largest, interpolation_wgt);
if (largest < threshold && pnGram.size() < ncls)
{
pnGram.push_back(nGram());
pnGram.back().Initialize(vm);
pnGram.back().UpdateNgramCounts(rep, 0, sd);
pnGram.back().UpdateNgramCounts(rep, 1, sd);
iarg = pnGram.size() - 1;
}
else{
pnGram[iarg].UpdateNgramCounts(rep, 0, sd);
pnGram[iarg].UpdateNgramCounts(rep, 1, sd);
}
/// update centroid periodically
if ((pnGram.size() < ncls) || ((pnGram.size() > ncls - 1) && sid % 1000 == 0))
{
for (auto &p : pnGram){
p.ComputeNgramModel();
}
}
sid++;
}
}
/// do classification now
vector<Sentence> typical_response(pnGram.size());
vector<cnn::real> best_score(pnGram.size(), LZERO);
ofstream ofs;
if (vm.count("outputfile") > 0)
ofs.open(vm["outputfile"].as<string>());
long did = 0;
long idx = 0;
for (auto& t : test)
{
int tid = 0;
for (auto& s : t)
{
cnn::real largest;
Sentence rep = remove_first_and_last(s.second);
long iarg = closest_class_id(pnGram, 0, pnGram.size(), rep, largest, interpolation_wgt);
if (best_score[iarg] < largest)
{
best_score[iarg] = largest;
typical_response[iarg] = s.second;
}
data2cls.push_back(iarg);
}
}
vector<string> i_representative;
for (auto& p : typical_response)
{
string sl = "";
for (auto w : p)
sl = sl + sd.Convert(w) + " ";
i_representative.push_back(sl);
}
long dataid = 0;
did = 0;
for (auto& t : test)
{
int tid = 0;
for (auto& s : t)
{
string userstr;
for (auto& p : s.first)
userstr = userstr + " " + sd.Convert(p);
string responsestr;
for (auto& p : s.second)
responsestr = responsestr + " " + sd.Convert(p);
int iarg = data2cls[dataid];
string ostr = boost::lexical_cast<string>(did)+" ||| " + boost::lexical_cast<string>(tid)+" ||| " + userstr + " ||| " + responsestr;
ostr = ostr + " ||| " + boost::lexical_cast<string>(iarg)+" ||| " + i_representative[iarg];
if (ofs.is_open())
{
ofs << ostr << endl;
}
else
cout << ostr << endl;
tid++;
dataid++;
}
did++;
}
if (ofs.is_open())
ofs.close();
}
/**
obtain the closet class id. the class has been orgnaized linearly
for example, the following is a vector of two large cluster, and there are three subclasses within each cluster
[cls_01 cls_02 cls_03 cls_11 cls_12 cls_13]
return the index, base 0, of the position of the class that has the largest likelihood.
The index is an absolution position, with offset of the base class position.
*/
template<class AM_t>
int TrainProcess<AM_t>::closest_class_id(vector<nGram>& pnGram, int this_cls, int nclsInEachCluster, const Sentence& obs,
cnn::real& score, cnn::real interpolation_wgt)
{
vector<cnn::real> llk(nclsInEachCluster);
for (int c = 0; c < nclsInEachCluster; c++)
llk[c] = pnGram[c + this_cls * nclsInEachCluster].GetSentenceLL(obs, interpolation_wgt);
cnn::real largest = llk[0];
int iarg = 0;
for (int c = 1; c < nclsInEachCluster; c++)
{
if (llk[c] > largest)
{
largest = llk[c];
iarg = c;
}
}
score = largest;
return iarg + this_cls;
}
/**
Find the top representative in each class and assign a representative, together with its index, to the original input
*/
template <class AM_t>
void TrainProcess<AM_t>::representative_presentation(
vector<nGram> pnGram,
const Sentences& responses,
Dict& sd,
vector<int>& i_data_to_cls,
vector<string>& i_representative,
cnn::real interpolation_wgt)
{
long did = 0;
int ncls = pnGram.size();
vector<cnn::real> i_so_far_largest_score(ncls, -10000.0);/// the vector saving the largest score of a cluster from any observations so far
vector<int> i_the_closet_input(ncls, -1); /// the index to the input that has the closest distance to centroid of each class
i_representative.resize(ncls, "");
for (auto& t : responses)
{
cnn::real largest;
int iarg = closest_class_id(pnGram, 0, ncls, t, largest, interpolation_wgt);
i_data_to_cls.push_back(iarg);
long icls = 0;
for (auto& p : pnGram)
{
cnn::real lk = p.GetSentenceLL(t, interpolation_wgt);
if (i_so_far_largest_score[icls] < lk)
{
i_so_far_largest_score[icls] = lk;
i_the_closet_input[icls] = i_data_to_cls.size() - 1;
}
icls++;
}
}
/// represent the cluster with closest observation
int i_representations = 0;
for (int i = 0; i < ncls; i++)
{
i_representative[i] = "";
if (i_the_closet_input[i] >= 0)
{
for (auto& p : responses[i_the_closet_input[i]])
i_representative[i] = i_representative[i] + " " + sd.Convert(p);
i_representations++;
}
else{
cout << "cluster" << i << " is empty" << endl;
throw("cluster is empty");
}
}
cout << "total " << i_representations << " representations " << endl;
}
/**
Given trained model, do hierarchical ngram clustering
*/
template <class AM_t>
void TrainProcess<AM_t>::hierarchical_ngram_clustering(variables_map vm, const CorpusWithClassId& test, Dict& sd)
{
Corpus empty;
int ncls = vm["ngram-num-clusters"].as<int>();
int nclsInEachCluster = vm["ncls-in-each-cluster"].as<int>();
vector<nGram> pnGram(ncls*nclsInEachCluster);
long i = 0;
for (auto& p : pnGram)
{
p.Initialize(vm);
p.LoadModel(".m" + boost::lexical_cast<string>(i));
i++;
}
cnn::real interpolation_wgt = vm["interpolation_wgt"].as<cnn::real>();
/// flatten corpus
Sentences user_inputs;
vector<SentenceWithId> response, not_randomly_shuffled_response;
flatten_corpus(test, user_inputs, response);
not_randomly_shuffled_response = response; /// backup of response that is not randomly shuffled
user_inputs.clear();
/// do classification now
ofstream ofs;
if (vm.count("outputfile") > 0)
ofs.open(vm["outputfile"].as<string>());
long did = 0;
vector<cnn::real> i_so_far_largest_score(ncls * nclsInEachCluster, -10000.0);/// the vector saving the largest score of a cluster from any observations so far
vector<int> i_the_closet_input(ncls * nclsInEachCluster, -1); /// the index to the input that has the closest distance to centroid of each class
vector<int> i_data_to_cls;
for (auto& t : test)
{
for (auto& s : t)
{
int this_cls = s.second.second;
int cls_offset = this_cls * nclsInEachCluster;
vector<cnn::real> llk(nclsInEachCluster);
for (int i = 0; i < nclsInEachCluster; i++)
llk[i] = pnGram[i + cls_offset].GetSentenceLL(s.second.first, interpolation_wgt);
cnn::real largest = llk[0];
int iarg = 0;
for (int i = 1; i < nclsInEachCluster; i++)
{
if (llk[i] > largest)
{
largest = llk[i];
iarg = i;
}
}
iarg += cls_offset;
i_data_to_cls.push_back(iarg);
/// update representation of this class
if (i_so_far_largest_score[iarg] < largest)
{
i_so_far_largest_score[iarg] = largest;
i_the_closet_input[iarg] = i_data_to_cls.size() - 1;
}
}
}
/// represent the cluster with closest observation
vector<string> i_representative(ncls * nclsInEachCluster);
for (int i = 0; i < ncls *nclsInEachCluster; i++)
{
i_representative[i] = "";
if (i_the_closet_input[i] >= 0)
for (auto& p : not_randomly_shuffled_response[i_the_closet_input[i]].first)
i_representative[i] = i_representative[i] + " " + sd.Convert(p);
}
long idx = 0;
for (auto& t : test)
{
int tid = 0;
for (auto& s : t)
{
int iarg = i_data_to_cls[idx];
string userstr;
for (auto& p : s.first)
userstr = userstr + " " + sd.Convert(p);
string responsestr;
for (auto& p : s.second.first)
responsestr = responsestr + " " + sd.Convert(p);
string ostr = boost::lexical_cast<string>(did)+" ||| " + boost::lexical_cast<string>(tid)+" ||| " + userstr + " ||| " + responsestr;
ostr = ostr + " ||| " + boost::lexical_cast<string>(iarg)+" ||| " + i_representative[iarg];
if (ofs.is_open())
{
ofs << ostr << endl;
}
else
cout << ostr << endl;
tid++;
idx++;
}
did++;
}
if (ofs.is_open())
ofs.close();
}
template <class Proc>
class ClassificationTrainProcess : public TrainProcess<Proc>{
public:
ClassificationTrainProcess(){
}
void split_data_batch_train(string train_filename, Model &model, Proc &am, Corpus &devel, Trainer &sgd, string out_file, int max_epochs, int nparallel, int epochsize, bool do_segmental_training, bool do_gradient_check);
void batch_train(Model &model, Proc &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs, int nparallel, cnn::real &best, bool segmental_training,
bool sgd_update_epochs, bool do_gradient_check, bool b_inside_logic);
public:
vector<cnn::real> ppl_hist;
};
/**
since the tool loads data into memory and that can cause memory exhaustion, this function do sampling of data for each epoch.
*/
template <class AM_t>
void ClassificationTrainProcess<AM_t>::split_data_batch_train(string train_filename, Model &model, AM_t &am, Corpus &devel,
Trainer &sgd, string out_file,
int max_epochs, int nparallel, int epochsize, bool segmental_training, bool do_gradient_check)
{
// a mirrow of the agent to generate decoding results so that their results can be evaluated
// this is not efficient implementation, better way is to share model parameters
cnn::real largest_cost = 9e+99;
ifstream ifs(train_filename);
int trial = 0;
while (sgd.epoch < max_epochs)
{
cerr << "Reading training data from " << train_filename << "...\n";
Corpus training = read_corpus(ifs, sd, kSRC_SOS, kSRC_EOS, epochsize, make_pair<int, int>(2, 4), make_pair<bool, bool>(true, false),
id2str.phyId2logicId);
training_numturn2did = get_numturn2dialid(training);
if (ifs.eof() || training.size() == 0)
{
ifs.close();
ifs.open(train_filename);
if (training.size() == 0)
{
continue;
}
save_cnn_model(out_file + ".i" + boost::lexical_cast<string>(sgd.epoch), &model);
sgd.update_epoch();
}
batch_train(model, am, training, devel, sgd, out_file, 1, nparallel, largest_cost, segmental_training, false, do_gradient_check, false);
if (fmod(trial, 50) == 0)
{
save_cnn_model(out_file + ".i" + boost::lexical_cast<string>(sgd.epoch), &model);
}
trial++;
}
ifs.close();
}
/*
@ b_inside_logic : use logic inside of batch to do evaluation on the dev set. if it is false, do dev set evaluation only if sgd.epoch changes
*/
template <class AM_t>
void ClassificationTrainProcess<AM_t>::batch_train(Model &model, AM_t &am, Corpus &training, Corpus &devel,
Trainer &sgd, string out_file, int max_epochs, int nparallel, cnn::real &best, bool segmental_training,
bool sgd_update_epochs, bool doGradientCheck, bool b_inside_logic)
{
unsigned report_every_i = 50;
unsigned dev_every_i_reports = 1000;
unsigned si = training.size(); /// number of dialgoues in training
vector<unsigned> order(training.size());
for (unsigned i = 0; i < order.size(); ++i) order[i] = i;
bool first = true;
int report = 0;
unsigned lines = 0;
int epoch = 0;
reset_smoothed_ppl(ppl_hist);
int prv_epoch = -1;
vector<bool> v_selected(training.size(), false); /// track if a dialgoue is used
size_t i_stt_diag_id = 0;
/// if no update of sgd in this function, need to train with all data in one pass and then return
if (sgd_update_epochs == false)
{
report_every_i = training.size();
si = 0;
}
while ((sgd_update_epochs && sgd.epoch < max_epochs) || /// run multiple passes of data
(!sgd_update_epochs && si < training.size())) /// run one pass of the data
{
Timer iteration("completed in");
cnn::real dloss = 0;
cnn::real dchars_s = 0;
cnn::real dchars_t = 0;
cnn::real dchars_tt = 0;
PDialogue v_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
for (unsigned iter = 0; iter < report_every_i;) {
if (si == training.size()) {
si = 0;
if (first) { first = false; }
else if (sgd_update_epochs){
sgd.update_epoch();
lines -= training.size();
}
}
if (si % order.size() == 0) {
cerr << "**SHUFFLE\n";
/// shuffle number of turns
shuffle(training_numturn2did.vNumTurns.begin(), training_numturn2did.vNumTurns.end(), *rndeng);
i_stt_diag_id = 0;
v_selected = vector<bool>(training.size(), false);
for (auto p : training_numturn2did.mapNumTurn2DialogId){
/// shuffle dailogues with the same number of turns
random_shuffle(p.second.begin(), p.second.end());
}
v_selected.assign(training.size(), false);
}
Dialogue prv_turn;
size_t turn_id = 0;
vector<int> i_sel_idx = get_same_length_dialogues(training, nparallel, i_stt_diag_id, v_selected, v_dialogues, training_numturn2did);
size_t nutt = i_sel_idx.size();
if (nutt == 0)
break;
if (verbose)
{
cerr << "selected " << nutt << " : ";
for (auto p : i_sel_idx)
cerr << p << " ";
cerr << endl;
}
if (segmental_training)
segmental_forward_backward(model, am, v_dialogues, nutt, dloss, dchars_s, dchars_t, false, doGradientCheck, &sgd);
else
nosegmental_forward_backward(model, am, v_dialogues, nutt, dloss, dchars_s, dchars_t, true, 0, &sgd);
si += nutt;
lines += nutt;
iter += nutt;
}
sgd.status();
iteration.WordsPerSecond(dchars_t + dchars_s);
cerr << "\n***Train " << (lines / (cnn::real)training.size()) * 100 << " %100 of epoch[" << sgd.epoch << "] E = " << (dloss / dchars_t) << " ppl=" << exp(dloss / dchars_t) << ' ';
vector<SentencePair> vs;
for (auto&p : v_dialogues)
vs.push_back(p[0]);
vector<SentencePair> vres;
am.respond(vs, vres, sd, id2str);
// show score on dev data?
report++;
if (b_inside_logic && devel.size() > 0 && (floor(sgd.epoch) != prv_epoch
|| (report % dev_every_i_reports == 0
|| fmod(lines, (cnn::real)training.size()) == 0.0))) {
cnn::real ddloss = 0;
cnn::real ddchars_s = 0;
cnn::real ddchars_t = 0;
{
vector<bool> vd_selected(devel.size(), false); /// track if a dialgoue is used
size_t id_stt_diag_id = 0;
PDialogue vd_dialogues; // dialogues are orgnaized in each turn, in each turn, there are parallel data from all speakers
vector<int> id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
size_t ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
while (ndutt > 0)
{
if (segmental_training)
segmental_forward_backward(model, am, vd_dialogues, ndutt, ddloss, ddchars_s, ddchars_t, false);
else
nosegmental_forward_backward(model, am, vd_dialogues, ndutt, ddloss, ddchars_s, ddchars_t, true);
id_sel_idx = get_same_length_dialogues(devel, NBR_DEV_PARALLEL_UTTS, id_stt_diag_id, vd_selected, vd_dialogues, devel_numturn2did);
ndutt = id_sel_idx.size();
if (verbose)
{
cerr << "selected " << ndutt << " : ";
for (auto p : id_sel_idx)
cerr << p << " ";
cerr << endl;
}
}
}
ddloss = smoothed_ppl(ddloss, ppl_hist);
if (ddloss < best) {
best = ddloss;
save_cnn_model(out_file, &model);
}
else{
sgd.eta0 *= 0.5; /// reduce learning rate
sgd.eta *= 0.5; /// reduce learning rate
}
cerr << "\n***DEV [epoch=" << (lines / (cnn::real)training.size()) << "] E = " << (ddloss / ddchars_t) << " ppl=" << exp(ddloss / ddchars_t) << ' ';
}
prv_epoch = floor(sgd.epoch);
if (sgd_update_epochs == false)
{
/// because there is no update on sgd epoch, this loop can run forever.
/// so just run one iteration and quit
break;
}
else{
save_cnn_model(out_file + "e" + boost::lexical_cast<string>(sgd.epoch), &model);
}
}
}
#endif
|
omp-taskloop-nogroup.c | #include <omp.h>
#include <unistd.h>
#include <stdio.h>
#define THREADS 6
#define LEN 4
int main(void)
{
int j=0;
#pragma omp parallel num_threads(THREADS)
{
#pragma omp taskloop nogroup
for (j=0; j<LEN; j++)
{
usleep(30);
}
#pragma omp taskwait
}
return 0;
}
|
VolumetricAdaptiveMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricAdaptiveMaxPooling.c"
#else
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
// 5d tensor B x D x T x H x W
static void THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(
real *input_p,
real *output_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeT,
int64_t isizeH,
int64_t isizeW,
int64_t osizeT,
int64_t osizeH,
int64_t osizeW,
int64_t istrideD,
int64_t istrideT,
int64_t istrideH,
int64_t istrideW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
/* loop over output */
int64_t ot, oh, ow;
for(ot = 0; ot < osizeT; ot++)
{
int64_t istartT = START_IND(ot, osizeT, isizeT);
int64_t iendT = END_IND(ot, osizeT, isizeT);
int64_t kT = iendT - istartT;
for(oh = 0; oh < osizeH; oh++)
{
int64_t istartH = START_IND(oh, osizeH, isizeH);
int64_t iendH = END_IND(oh, osizeH, isizeH);
int64_t kH = iendH - istartH;
for(ow = 0; ow < osizeW; ow++)
{
int64_t istartW = START_IND(ow, osizeW, isizeW);
int64_t iendW = END_IND(ow, osizeW, isizeW);
int64_t kW = iendW - istartW;
/* local pointers */
real *ip = input_p + d*istrideD + istartT *istrideT + istartH*istrideH + istartW*istrideW;
real *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow;
THIndex_t *indp = ind_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow;
/* compute local max: */
int64_t maxindex = -1;
real maxval = -FLT_MAX;
int64_t it, ih, iw;
for(it = 0; it < kT; it++)
{
for(ih = 0; ih < kH; ih++)
{
for(iw = 0; iw < kW; iw++)
{
real val = *(ip + it*istrideT + ih*istrideH + iw*istrideW);
if (val > maxval)
{
maxval = val;
maxindex = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + (iw+istartW);
}
}
}
}
/* set output to local max */
*op = maxval;
/* store location of max */
*indp = maxindex + TH_INDEX_BASE;
}
}
}
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int osizeT,
int osizeW,
int osizeH)
{
int dimD = 0;
int dimT = 1;
int dimH = 2;
int dimW = 3;
int64_t sizeB = 1;
int64_t sizeD;
int64_t isizeT;
int64_t isizeH;
int64_t isizeW;
int64_t istrideB;
int64_t istrideD;
int64_t istrideT;
int64_t istrideH;
int64_t istrideW;
real *input_data;
real *output_data;
THIndex_t *indices_data;
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 5)
{
istrideB = input->stride[0];
sizeB = input->size[0];
dimD++;
dimT++;
dimH++;
dimW++;
}
/* sizes */
sizeD = input->size[dimD];
isizeT = input->size[dimT];
isizeH = input->size[dimH];
isizeW = input->size[dimW];
/* strides */
istrideD = input->stride[dimD];
istrideT = input->stride[dimT];
istrideH = input->stride[dimH];
istrideW = input->stride[dimW];
/* resize output */
if (input->nDimension == 4)
{
THTensor_(resize4d)(output, sizeD, osizeT, osizeH, osizeW);
/* indices will contain max input locations for each output point */
THIndexTensor_(resize4d)(indices, sizeD, osizeT, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data,
indices_data,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT,
istrideH, istrideW);
}
else
{
int64_t b;
THTensor_(resize5d)(output, sizeB, sizeD, osizeT, osizeH, osizeW);
/* indices will contain max input locations for each output point */
THIndexTensor_(resize5d)(indices, sizeB, sizeD, osizeT, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeT*osizeH*osizeW,
indices_data+b*sizeD*osizeT*osizeH*osizeW,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT,
istrideH, istrideW);
}
}
}
static void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeT,
int64_t isizeH,
int64_t isizeW,
int64_t osizeT,
int64_t osizeH,
int64_t osizeW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
real *gradInput_p_d = gradInput_p + d*isizeT*isizeH*isizeW;
real *gradOutput_p_d = gradOutput_p + d*osizeT*osizeH*osizeW;
THIndex_t *ind_p_d = ind_p + d*osizeT*osizeH*osizeW;
/* calculate max points */
int64_t ot, oh, ow;
for(ot = 0; ot < osizeT; ot++)
{
for(oh = 0; oh < osizeH; oh++)
{
for(ow = 0; ow < osizeW; ow++)
{
/* retrieve position of max */
int64_t maxp = ind_p_d[ot*osizeH*osizeW + oh*osizeW + ow] - TH_INDEX_BASE;
/* update gradient */
gradInput_p_d[maxp] += gradOutput_p_d[ot*osizeH*osizeW + oh*osizeW + ow];
}
}
}
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices)
{
int dimD = 0;
int dimT = 1;
int dimH = 2;
int dimW = 3;
int64_t sizeB = 1;
int64_t sizeD;
int64_t isizeT;
int64_t isizeH;
int64_t isizeW;
int64_t osizeT;
int64_t osizeH;
int64_t osizeW;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5) {
sizeB = input->size[0];
dimD++;
dimT++;
dimH++;
dimW++;
}
/* sizes */
sizeD = input->size[dimD];
isizeT = input->size[dimT];
isizeH = input->size[dimH];
isizeW = input->size[dimW];
osizeT = gradOutput->size[dimT];
osizeH = gradOutput->size[dimH];
osizeW = gradOutput->size[dimW];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->nDimension == 4)
{
THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
}
else
{
int64_t b;
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeT*isizeH*isizeW, gradOutput_data+b*sizeD*osizeT*osizeH*osizeW,
indices_data+b*sizeD*osizeT*osizeH*osizeW,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
cov_fcts.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_COV_FUNCTIONS_
#define GPB_COV_FUNCTIONS_
#include <GPBoost/type_defs.h>
#include <string>
#include <set>
#include <string>
#include <vector>
#include <cmath>
#include <LightGBM/utils/log.h>
using LightGBM::Log;
namespace GPBoost {
template<typename T_mat>
class RECompGP;
/*!
* \brief This class implements the covariance functions used for the Gaussian proceses
*
* Some details:
* 1. The template parameter <T_mat> can be either <den_mat_t> or <sp_mat_t>
*/
class CovFunction {
public:
/*! \brief Constructor */
CovFunction();
/*!
* \brief Constructor
* \param cov_fct_type Type of covariance function. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param shape Shape parameter of covariance function (=smoothness parameter for Matern and Wendland covariance. For the Wendland covariance function, we follow the notation of Bevilacqua et al. (2018)). This parameter is irrelevant for some covariance functions such as the exponential or Gaussian.
* \param taper_range Range parameter of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018)
* \param taper_mu Parameter \mu of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018)
*/
CovFunction(string_t cov_fct_type,
double shape = 0.,
double taper_range = 1.,
double taper_mu = 2.) {
num_cov_par_ = 2;
if (SUPPORTED_COV_TYPES_.find(cov_fct_type) == SUPPORTED_COV_TYPES_.end()) {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type.c_str());
}
if (cov_fct_type == "matern") {
if (!(AreSame(shape, 0.5) || AreSame(shape, 1.5) || AreSame(shape, 2.5))) {
Log::REFatal("Only shape / smoothness parameters 0.5, 1.5, and 2.5 supported for the Matern covariance function");
}
}
else if (cov_fct_type == "powered_exponential") {
if (shape <= 0. || shape > 2.) {
Log::REFatal("Shape needs to be larger than 0 and smaller or equal than 2 for the 'powered_exponential' covariance function");
}
}
else if (cov_fct_type == "wendland") {
if (!(AreSame(shape, 0.0) || AreSame(shape, 1.0) || AreSame(shape, 2.0))) {
Log::REFatal("Only shape / smoothness parameters 0, 1, and 2 supported for the Wendland covariance function");
}
CHECK(taper_range > 0.);
CHECK(taper_mu >= 1.);
taper_range_ = taper_range;
taper_mu_ = taper_mu;
num_cov_par_ = 1;
}
else if (cov_fct_type == "exponential_tapered") {
if (!(AreSame(shape, 0.0) || AreSame(shape, 1.0) || AreSame(shape, 2.0))) {
Log::REFatal("Only shape / smoothness parameters 0, 1, and 2 supported for the Wendland-tapered exponential covariance function");
}
CHECK(taper_range > 0.);
CHECK(taper_mu >= 1.);
taper_range_ = taper_range;
taper_mu_ = taper_mu;
num_cov_par_ = 2;
}
cov_fct_type_ = cov_fct_type;
shape_ = shape;
}
/*! \brief Destructor */
~CovFunction() {
}
/*!
* \brief Transform the covariance parameters
* \param sigma2 Marginal variance
* \param pars Vector with covariance parameters on orignal scale
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const double sigma2, const vec_t& pars, vec_t& pars_trans) const {
pars_trans = pars;
pars_trans[0] = pars[0] / sigma2;
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
pars_trans[1] = 1. / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
pars_trans[1] = sqrt(3.) / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
pars_trans[1] = sqrt(5.) / pars[1];
}
else if (cov_fct_type_ == "gaussian") {
pars_trans[1] = 1. / (pars[1] * pars[1]);
}
else if (cov_fct_type_ == "powered_exponential") {
pars_trans[1] = 1. / (std::pow(pars[1], shape_));
}
}
/*!
* \brief Function transforms the covariance parameters back to the original scale
* \param sigma2 Marginal variance
* \param pars Vector with covariance parameters
* \param[out] pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const double sigma2, const vec_t& pars, vec_t& pars_orig) const {
pars_orig = pars;
pars_orig[0] = sigma2 * pars[0];
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
pars_orig[1] = 1. / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
pars_orig[1] = sqrt(3.) / pars[1];
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
pars_orig[1] = sqrt(5.) / pars[1];
}
else if (cov_fct_type_ == "gaussian") {
pars_orig[1] = 1. / std::sqrt(pars[1]);
}
else if (cov_fct_type_ == "powered_exponential") {
pars_orig[1] = 1. / (std::pow(pars[1], 1. / shape_));
}
}
//TODO: For the training data, this can be done faster as sigma is symetric: add parameter bool dist_is_symetric and do calculations only for upper half?
/*!
* \brief Calculates covariance matrix
* Note: this is the version for dense matrixes
* \param dist Distance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma Covariance matrix
*/
void GetCovMat(const den_mat_t& dist,
const vec_t& pars,
den_mat_t& sigma) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" || (cov_fct_type_ == "matern" && AreSame(shape_, 0.5))) {
//den_mat_t sigma(dist.rows(),dist.cols());//TODO: this is not working, check whether this can be done using triangularView? If it works, make dist_ (see re_comp.h) an upper triangular matrix as lower part is not used
//sigma.triangularView<Eigen::Upper>() = (pars[1] * ((-pars[2] * dist.triangularView<Eigen::Upper>().array()).exp())).matrix();
//sigma.triangularView<Eigen::StrictlyLower>() = sigma.triangularView<Eigen::StrictlyUpper>().transpose();
sigma = (pars[0] * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
sigma = (pars[0] * (1. + pars[1] * dist.array()) * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
sigma = (pars[0] * (1. + pars[1] * dist.array() + pars[1] * pars[1] * dist.array().square() / 3.) * ((-pars[1] * dist.array()).exp())).matrix();
}
else if (cov_fct_type_ == "gaussian") {
sigma = (pars[0] * ((-pars[1] * dist.array().square()).exp())).matrix();
}
else if (cov_fct_type_ == "powered_exponential") {
sigma = (pars[0] * ((-pars[1] * dist.array().pow(shape_)).exp())).matrix();
}
else if (cov_fct_type_ == "wendland") {
sigma = den_mat_t(dist.rows(), dist.cols());
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)dist.rows(); ++i) {
for (int j = 0; j < (int)dist.cols(); ++j) {
if (dist(i, j) >= taper_range_) {
sigma(i, j) = 0.;
}
else {
sigma(i, j) = pars[0] * WendlandCorrelation(dist(i, j));
}
}
}
}
else if (cov_fct_type_ == "exponential_tapered") {
sigma = den_mat_t(dist.rows(), dist.cols());
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)dist.rows(); ++i) {
for (int j = 0; j < (int)dist.cols(); ++j) {
if (dist(i, j) >= taper_range_) {
sigma(i, j) = 0.;
}
else {
sigma(i, j) = pars[0] * WendlandCorrelation(dist(i, j)) * std::exp(-pars[1] * dist(i, j));
}
}
}
}
else {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates covariance matrix
* Note: this is the version for sparse matrixes
* \param dist Distance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma Covariance matrix
*/
void GetCovMat(const sp_mat_t& dist,
const vec_t& pars,
sp_mat_t& sigma) const {
CHECK(pars.size() == num_cov_par_);
sigma = dist;
if (cov_fct_type_ == "exponential" || (cov_fct_type_ == "matern" && AreSame(shape_, 0.5))) {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
sigma.coeffs() = pars[0] * (1. + pars[1] * dist.coeffs()) * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
sigma.coeffs() = pars[0] * (1. + pars[1] * dist.coeffs() +
pars[1] * pars[1] * dist.coeffs().square() / 3.) * ((-pars[1] * dist.coeffs()).exp());
}
else if (cov_fct_type_ == "gaussian") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs().square()).exp());
}
else if (cov_fct_type_ == "powered_exponential") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs().pow(shape_)).exp());
}
else if (cov_fct_type_ == "wendland") {
sigma.coeffs() = pars[0];
AddWendlandCorrelation(sigma, dist);
}
else if (cov_fct_type_ == "exponential_tapered") {
sigma.coeffs() = pars[0] * ((-pars[1] * dist.coeffs()).exp());
AddWendlandCorrelation(sigma, dist);
}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 0.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_);
//}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 1.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 1.));
//}
//else if (cov_fct_type_ == "wendland" && AreSame(shape_, 2.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 2.) + (sigma.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.);
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 0.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_) * ((-pars[1] * sigma.coeffs()).exp());
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 1.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 1.)) *
// ((-pars[1] * sigma.coeffs()).exp());
//}
//else if (cov_fct_type_ == "exponential_tapered" && AreSame(shape_, 2.)) {
// sigma.coeffs() = pars[0] * (1. - sigma.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
// (1. + sigma.coeffs() / taper_range_ * (taper_mu_ + 2.) + (sigma.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.) *
// ((-pars[1] * sigma.coeffs()).exp());
//}
else {
Log::REFatal("Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates derivatives of the covariance matrix with respect to the inverse range parameter
* Note: this is the version for dense matrixes
* \param dist Distance matrix
* \param sigma Covariance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma_grad Derivative of covariance matrix with respect to the inverse range parameter
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise with respect to the original range parameter (the parameters values pars are always given on the transformed scale). Optimiziation is done using transf_scale=true. transf_scale=false is needed, for instance, for calcualting the Fisher information on the original scale.
* \param marg_var Marginal variance parameters sigma^2 (used only if transf_scale = false to transform back)
*/
void GetCovMatGradRange(const den_mat_t& dist,
const den_mat_t& sigma,
const vec_t& pars,
den_mat_t& sigma_grad,
bool transf_scale,
double marg_var) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
double cm = transf_scale ? (-1. * pars[1]) : (marg_var * pars[1] * pars[1]);
sigma_grad = cm * sigma.cwiseProduct(dist);
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = cm * ((pars[0] * pars[1] * dist.array() * ((-pars[1] * dist.array()).exp())).matrix() - pars[1] * sigma.cwiseProduct(dist));
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = cm * ((pars[0] * (pars[1] * dist.array() + (2. / 3.) * pars[1] * pars[1] * dist.array().square()) *
((-pars[1] * dist.array()).exp())).matrix() - pars[1] * sigma.cwiseProduct(dist));
}
else if (cov_fct_type_ == "gaussian") {
double cm = transf_scale ? (-1. * pars[1]) : (2. * marg_var * std::pow(pars[1], 3. / 2.));
sigma_grad = cm * sigma.cwiseProduct(dist.array().square().matrix());
}
else if (cov_fct_type_ == "powered_exponential") {
double cm = transf_scale ? (-1. * pars[1]) : (shape_ * marg_var * std::pow(pars[1], (shape_ + 1.) / shape_));
sigma_grad = cm * sigma.cwiseProduct(dist.array().pow(shape_).matrix());
}
else {
Log::REFatal("GetCovMatGradRange: Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
/*!
* \brief Calculates derivatives of the covariance matrix with respect to the inverse range parameter
* Note: this is the version for sparse matrixes
* \param dist Distance matrix
* \param sigma Covariance matrix
* \param pars Vector with covariance parameters
* \param[out] sigma_grad Derivative of covariance matrix with respect to the inverse range parameter
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise with respect to the original range parameter (the parameters values pars are always given on the transformed scale). Optimiziation is done using transf_scale=true. transf_scale=false is needed, for instance, for calcualting the Fisher information on the original scale.
* \param marg_var Marginal variance parameters sigma^2 (used only if transf_scale = false to transform back)
*/
void GetCovMatGradRange(const sp_mat_t& dist,
const sp_mat_t& sigma,
const vec_t& pars,
sp_mat_t& sigma_grad,
bool transf_scale,
double marg_var) const {
CHECK(pars.size() == num_cov_par_);
if (cov_fct_type_ == "exponential" ||
(cov_fct_type_ == "matern" && AreSame(shape_, 0.5)) ||
cov_fct_type_ == "exponential_tapered") {
double cm = transf_scale ? (-1. * pars[1]) : (marg_var * pars[1] * pars[1]);
sigma_grad = cm * sigma.cwiseProduct(dist);
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 1.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = dist;
sigma_grad.coeffs() = pars[0] * pars[1] * sigma_grad.coeffs() * ((-pars[1] * sigma_grad.coeffs()).exp());
sigma_grad -= pars[1] * sigma.cwiseProduct(dist);
sigma_grad *= cm;
}
else if (cov_fct_type_ == "matern" && AreSame(shape_, 2.5)) {
double cm = transf_scale ? 1. : (-1. * marg_var * pars[1]);
sigma_grad = dist;
sigma_grad.coeffs() = pars[0] * (pars[1] * sigma_grad.coeffs() + (2. / 3.) * pars[1] * pars[1] * sigma_grad.coeffs().square()) *
((-pars[1] * sigma_grad.coeffs()).exp());
sigma_grad -= pars[1] * sigma.cwiseProduct(dist);
sigma_grad *= cm;
}
else if (cov_fct_type_ == "gaussian") {
double cm = transf_scale ? (-1. * pars[1]) : (2. * marg_var * std::pow(pars[1], 3. / 2.));
sigma_grad = dist;
sigma_grad.coeffs() = sigma_grad.coeffs().square();
sigma_grad = cm * sigma.cwiseProduct(sigma_grad);
}
else if (cov_fct_type_ == "powered_exponential") {
double cm = transf_scale ? (-1. * pars[1]) : (shape_ * marg_var * std::pow(pars[1], (shape_ + 1.) / shape_));
sigma_grad = dist;
sigma_grad.coeffs() = sigma_grad.coeffs().pow(shape_);
sigma_grad = cm * sigma.cwiseProduct(sigma_grad);
}
else {
Log::REFatal("GetCovMatGradRange: Covariance of type '%s' is not supported.", cov_fct_type_.c_str());
}
}
private:
/*! \brief Type of covariance function */
string_t cov_fct_type_;
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double shape_;
/*! \brief Range parameter of Wendland covariance function / taper */
double taper_range_;
/*! \brief Parameter \mu of Wendland covariance function / taper. We follow the notation of Bevilacqua et al. (2018) */
double taper_mu_;
/*! \brief Number of covariance parameters*/
int num_cov_par_;
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_COV_TYPES_{ "exponential",
"gaussian",
"powered_exponential",
"matern",
"wendland",
"exponential_tapered" };
inline bool AreSame(const double a, const double b) const {
const double epsilon = 0.00000001;
bool are_same = false;
if (fabs(a) < epsilon) {
are_same = fabs(b) < epsilon;
}
else {
are_same = fabs(a - b) < a * epsilon;
}
return are_same;
}
/*!
* \brief Calculates Wendland correlation function (function used for dense matrices)
* \param dist Distance
* \return Wendland correlation
*/
inline double WendlandCorrelation(const double dist) const {
if (AreSame(shape_, 0.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_));
}
else if (AreSame(shape_, 1.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_ + 1.) * (1. + dist / taper_range_ * (taper_mu_ + 1.)));
}
else if (AreSame(shape_, 2.)) {
return(std::pow((1. - dist / taper_range_), taper_mu_ + 2.) *
(1. + dist / taper_range_ * (taper_mu_ + 2.) + std::pow(dist / taper_range_, 2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.));
}
else {
return 0.;
}
}
/*!
* \brief Calculates Wendland correlation function (function used for sparse matrices)
* \param[out] sigma (Covariance) matrix to which the Wendland correlation is "added" (multiplied with)
* \param dist Distance matrix
*/
inline void AddWendlandCorrelation(sp_mat_t& sigma, const sp_mat_t& dist) const {
if (AreSame(shape_, 0.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_);
}
else if (AreSame(shape_, 1.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_ + 1.) *
(1. + dist.coeffs() / taper_range_ * (taper_mu_ + 1.));
}
else if (AreSame(shape_, 2.)) {
sigma.coeffs() *= (1. - dist.coeffs() / taper_range_).pow(taper_mu_ + 2.) *
(1. + dist.coeffs() / taper_range_ * (taper_mu_ + 2.) + (dist.coeffs() / taper_range_).pow(2) * (taper_mu_ * taper_mu_ + 4 * taper_mu_ + 3.) / 3.);
}
}
template<typename>
friend class RECompGP;
};
} // namespace GPBoost
#endif // GPB_COV_FUNCTIONS_
|
parser.c | /* C++ Parser.
Copyright (C) 2000, 2001, 2002, 2003, 2004,
2005, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
Written by Mark Mitchell <mark@codesourcery.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "timevar.h"
#include "cpplib.h"
#include "tree.h"
#include "cp-tree.h"
#include "intl.h"
#include "c-family/c-pragma.h"
#include "decl.h"
#include "flags.h"
#include "diagnostic-core.h"
#include "output.h"
#include "target.h"
#include "cgraph.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
#include "plugin.h"
#include "tree-pretty-print.h"
#include "parser.h"
/* The lexer. */
/* The cp_lexer_* routines mediate between the lexer proper (in libcpp
and c-lex.c) and the C++ parser. */
static cp_token eof_token =
{
CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL }
};
/* The various kinds of non integral constant we encounter. */
typedef enum non_integral_constant {
NIC_NONE,
/* floating-point literal */
NIC_FLOAT,
/* %<this%> */
NIC_THIS,
/* %<__FUNCTION__%> */
NIC_FUNC_NAME,
/* %<__PRETTY_FUNCTION__%> */
NIC_PRETTY_FUNC,
/* %<__func__%> */
NIC_C99_FUNC,
/* "%<va_arg%> */
NIC_VA_ARG,
/* a cast */
NIC_CAST,
/* %<typeid%> operator */
NIC_TYPEID,
/* non-constant compound literals */
NIC_NCC,
/* a function call */
NIC_FUNC_CALL,
/* an increment */
NIC_INC,
/* an decrement */
NIC_DEC,
/* an array reference */
NIC_ARRAY_REF,
/* %<->%> */
NIC_ARROW,
/* %<.%> */
NIC_POINT,
/* the address of a label */
NIC_ADDR_LABEL,
/* %<*%> */
NIC_STAR,
/* %<&%> */
NIC_ADDR,
/* %<++%> */
NIC_PREINCREMENT,
/* %<--%> */
NIC_PREDECREMENT,
/* %<new%> */
NIC_NEW,
/* %<delete%> */
NIC_DEL,
/* calls to overloaded operators */
NIC_OVERLOADED,
/* an assignment */
NIC_ASSIGNMENT,
/* a comma operator */
NIC_COMMA,
/* a call to a constructor */
NIC_CONSTRUCTOR,
/* a transaction expression */
NIC_TRANSACTION
} non_integral_constant;
/* The various kinds of errors about name-lookup failing. */
typedef enum name_lookup_error {
/* NULL */
NLE_NULL,
/* is not a type */
NLE_TYPE,
/* is not a class or namespace */
NLE_CXX98,
/* is not a class, namespace, or enumeration */
NLE_NOT_CXX98
} name_lookup_error;
/* The various kinds of required token */
typedef enum required_token {
RT_NONE,
RT_SEMICOLON, /* ';' */
RT_OPEN_PAREN, /* '(' */
RT_CLOSE_BRACE, /* '}' */
RT_OPEN_BRACE, /* '{' */
RT_CLOSE_SQUARE, /* ']' */
RT_OPEN_SQUARE, /* '[' */
RT_COMMA, /* ',' */
RT_SCOPE, /* '::' */
RT_LESS, /* '<' */
RT_GREATER, /* '>' */
RT_EQ, /* '=' */
RT_ELLIPSIS, /* '...' */
RT_MULT, /* '*' */
RT_COMPL, /* '~' */
RT_COLON, /* ':' */
RT_COLON_SCOPE, /* ':' or '::' */
RT_CLOSE_PAREN, /* ')' */
RT_COMMA_CLOSE_PAREN, /* ',' or ')' */
RT_PRAGMA_EOL, /* end of line */
RT_NAME, /* identifier */
/* The type is CPP_KEYWORD */
RT_NEW, /* new */
RT_DELETE, /* delete */
RT_RETURN, /* return */
RT_WHILE, /* while */
RT_EXTERN, /* extern */
RT_STATIC_ASSERT, /* static_assert */
RT_DECLTYPE, /* decltype */
RT_OPERATOR, /* operator */
RT_CLASS, /* class */
RT_TEMPLATE, /* template */
RT_NAMESPACE, /* namespace */
RT_USING, /* using */
RT_ASM, /* asm */
RT_TRY, /* try */
RT_CATCH, /* catch */
RT_THROW, /* throw */
RT_LABEL, /* __label__ */
RT_AT_TRY, /* @try */
RT_AT_SYNCHRONIZED, /* @synchronized */
RT_AT_THROW, /* @throw */
RT_SELECT, /* selection-statement */
RT_INTERATION, /* iteration-statement */
RT_JUMP, /* jump-statement */
RT_CLASS_KEY, /* class-key */
RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */
RT_TRANSACTION_ATOMIC, /* __transaction_atomic */
RT_TRANSACTION_RELAXED, /* __transaction_relaxed */
RT_TRANSACTION_CANCEL /* __transaction_cancel */
} required_token;
/* Prototypes. */
static cp_lexer *cp_lexer_new_main
(void);
static cp_lexer *cp_lexer_new_from_tokens
(cp_token_cache *tokens);
static void cp_lexer_destroy
(cp_lexer *);
static int cp_lexer_saving_tokens
(const cp_lexer *);
static cp_token *cp_lexer_token_at
(cp_lexer *, cp_token_position);
static void cp_lexer_get_preprocessor_token
(cp_lexer *, cp_token *);
static inline cp_token *cp_lexer_peek_token
(cp_lexer *);
static cp_token *cp_lexer_peek_nth_token
(cp_lexer *, size_t);
static inline bool cp_lexer_next_token_is
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_not
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_keyword
(cp_lexer *, enum rid);
static cp_token *cp_lexer_consume_token
(cp_lexer *);
static void cp_lexer_purge_token
(cp_lexer *);
static void cp_lexer_purge_tokens_after
(cp_lexer *, cp_token_position);
static void cp_lexer_save_tokens
(cp_lexer *);
static void cp_lexer_commit_tokens
(cp_lexer *);
static void cp_lexer_rollback_tokens
(cp_lexer *);
static void cp_lexer_print_token
(FILE *, cp_token *);
static inline bool cp_lexer_debugging_p
(cp_lexer *);
static void cp_lexer_start_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static void cp_lexer_stop_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static cp_token_cache *cp_token_cache_new
(cp_token *, cp_token *);
static void cp_parser_initial_pragma
(cp_token *);
static tree cp_literal_operator_id
(const char *);
/* Manifest constants. */
#define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token))
#define CP_SAVED_TOKEN_STACK 5
/* Variables. */
/* The stream to which debugging output should be written. */
static FILE *cp_lexer_debug_stream;
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. */
int cp_unevaluated_operand;
/* Dump up to NUM tokens in BUFFER to FILE starting with token
START_TOKEN. If START_TOKEN is NULL, the dump starts with the
first token in BUFFER. If NUM is 0, dump all the tokens. If
CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be
highlighted by surrounding it in [[ ]]. */
static void
cp_lexer_dump_tokens (FILE *file, VEC(cp_token,gc) *buffer,
cp_token *start_token, unsigned num,
cp_token *curr_token)
{
unsigned i, nprinted;
cp_token *token;
bool do_print;
fprintf (file, "%u tokens\n", VEC_length (cp_token, buffer));
if (buffer == NULL)
return;
if (num == 0)
num = VEC_length (cp_token, buffer);
if (start_token == NULL)
start_token = VEC_address (cp_token, buffer);
if (start_token > VEC_address (cp_token, buffer))
{
cp_lexer_print_token (file, VEC_index (cp_token, buffer, 0));
fprintf (file, " ... ");
}
do_print = false;
nprinted = 0;
for (i = 0; VEC_iterate (cp_token, buffer, i, token) && nprinted < num; i++)
{
if (token == start_token)
do_print = true;
if (!do_print)
continue;
nprinted++;
if (token == curr_token)
fprintf (file, "[[");
cp_lexer_print_token (file, token);
if (token == curr_token)
fprintf (file, "]]");
switch (token->type)
{
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
case CPP_EOF:
fputc ('\n', file);
break;
default:
fputc (' ', file);
}
}
if (i == num && i < VEC_length (cp_token, buffer))
{
fprintf (file, " ... ");
cp_lexer_print_token (file, VEC_index (cp_token, buffer,
VEC_length (cp_token, buffer) - 1));
}
fprintf (file, "\n");
}
/* Dump all tokens in BUFFER to stderr. */
void
cp_lexer_debug_tokens (VEC(cp_token,gc) *buffer)
{
cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL);
}
/* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the
description for T. */
static void
cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t)
{
if (t)
{
fprintf (file, "%s: ", desc);
print_node_brief (file, "", t, 0);
}
}
/* Dump parser context C to FILE. */
static void
cp_debug_print_context (FILE *file, cp_parser_context *c)
{
const char *status_s[] = { "OK", "ERROR", "COMMITTED" };
fprintf (file, "{ status = %s, scope = ", status_s[c->status]);
print_node_brief (file, "", c->object_type, 0);
fprintf (file, "}\n");
}
/* Print the stack of parsing contexts to FILE starting with FIRST. */
static void
cp_debug_print_context_stack (FILE *file, cp_parser_context *first)
{
unsigned i;
cp_parser_context *c;
fprintf (file, "Parsing context stack:\n");
for (i = 0, c = first; c; c = c->next, i++)
{
fprintf (file, "\t#%u: ", i);
cp_debug_print_context (file, c);
}
}
/* Print the value of FLAG to FILE. DESC is a string describing the flag. */
static void
cp_debug_print_flag (FILE *file, const char *desc, bool flag)
{
if (flag)
fprintf (file, "%s: true\n", desc);
}
/* Print an unparsed function entry UF to FILE. */
static void
cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf)
{
unsigned i;
cp_default_arg_entry *default_arg_fn;
tree fn;
fprintf (file, "\tFunctions with default args:\n");
for (i = 0;
VEC_iterate (cp_default_arg_entry, uf->funs_with_default_args, i,
default_arg_fn);
i++)
{
fprintf (file, "\t\tClass type: ");
print_node_brief (file, "", default_arg_fn->class_type, 0);
fprintf (file, "\t\tDeclaration: ");
print_node_brief (file, "", default_arg_fn->decl, 0);
fprintf (file, "\n");
}
fprintf (file, "\n\tFunctions with definitions that require "
"post-processing\n\t\t");
for (i = 0; VEC_iterate (tree, uf->funs_with_definitions, i, fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
fprintf (file, "\n\tNon-static data members with initializers that require "
"post-processing\n\t\t");
for (i = 0; VEC_iterate (tree, uf->nsdmis, i, fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
}
/* Print the stack of unparsed member functions S to FILE. */
static void
cp_debug_print_unparsed_queues (FILE *file,
VEC(cp_unparsed_functions_entry, gc) *s)
{
unsigned i;
cp_unparsed_functions_entry *uf;
fprintf (file, "Unparsed functions\n");
for (i = 0; VEC_iterate (cp_unparsed_functions_entry, s, i, uf); i++)
{
fprintf (file, "#%u:\n", i);
cp_debug_print_unparsed_function (file, uf);
}
}
/* Dump the tokens in a window of size WINDOW_SIZE around the next_token for
the given PARSER. If FILE is NULL, the output is printed on stderr. */
static void
cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size)
{
cp_token *next_token, *first_token, *start_token;
if (file == NULL)
file = stderr;
next_token = parser->lexer->next_token;
first_token = VEC_address (cp_token, parser->lexer->buffer);
start_token = (next_token > first_token + window_size / 2)
? next_token - window_size / 2
: first_token;
cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size,
next_token);
}
/* Dump debugging information for the given PARSER. If FILE is NULL,
the output is printed on stderr. */
void
cp_debug_parser (FILE *file, cp_parser *parser)
{
const size_t window_size = 20;
cp_token *token;
expanded_location eloc;
if (file == NULL)
file = stderr;
fprintf (file, "Parser state\n\n");
fprintf (file, "Number of tokens: %u\n",
VEC_length (cp_token, parser->lexer->buffer));
cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope);
cp_debug_print_tree_if_set (file, "Object scope",
parser->object_scope);
cp_debug_print_tree_if_set (file, "Qualifying scope",
parser->qualifying_scope);
cp_debug_print_context_stack (file, parser->context);
cp_debug_print_flag (file, "Allow GNU extensions",
parser->allow_gnu_extensions_p);
cp_debug_print_flag (file, "'>' token is greater-than",
parser->greater_than_is_operator_p);
cp_debug_print_flag (file, "Default args allowed in current "
"parameter list", parser->default_arg_ok_p);
cp_debug_print_flag (file, "Parsing integral constant-expression",
parser->integral_constant_expression_p);
cp_debug_print_flag (file, "Allow non-constant expression in current "
"constant-expression",
parser->allow_non_integral_constant_expression_p);
cp_debug_print_flag (file, "Seen non-constant expression",
parser->non_integral_constant_expression_p);
cp_debug_print_flag (file, "Local names and 'this' forbidden in "
"current context",
parser->local_variables_forbidden_p);
cp_debug_print_flag (file, "In unbraced linkage specification",
parser->in_unbraced_linkage_specification_p);
cp_debug_print_flag (file, "Parsing a declarator",
parser->in_declarator_p);
cp_debug_print_flag (file, "In template argument list",
parser->in_template_argument_list_p);
cp_debug_print_flag (file, "Parsing an iteration statement",
parser->in_statement & IN_ITERATION_STMT);
cp_debug_print_flag (file, "Parsing a switch statement",
parser->in_statement & IN_SWITCH_STMT);
cp_debug_print_flag (file, "Parsing a structured OpenMP block",
parser->in_statement & IN_OMP_BLOCK);
cp_debug_print_flag (file, "Parsing a an OpenMP loop",
parser->in_statement & IN_OMP_FOR);
cp_debug_print_flag (file, "Parsing an if statement",
parser->in_statement & IN_IF_STMT);
cp_debug_print_flag (file, "Parsing a type-id in an expression "
"context", parser->in_type_id_in_expr_p);
cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"",
parser->implicit_extern_c);
cp_debug_print_flag (file, "String expressions should be translated "
"to execution character set",
parser->translate_strings_p);
cp_debug_print_flag (file, "Parsing function body outside of a "
"local class", parser->in_function_body);
cp_debug_print_flag (file, "Auto correct a colon to a scope operator",
parser->colon_corrects_to_scope_p);
if (parser->type_definition_forbidden_message)
fprintf (file, "Error message for forbidden type definitions: %s\n",
parser->type_definition_forbidden_message);
cp_debug_print_unparsed_queues (file, parser->unparsed_queues);
fprintf (file, "Number of class definitions in progress: %u\n",
parser->num_classes_being_defined);
fprintf (file, "Number of template parameter lists for the current "
"declaration: %u\n", parser->num_template_parameter_lists);
cp_debug_parser_tokens (file, parser, window_size);
token = parser->lexer->next_token;
fprintf (file, "Next token to parse:\n");
fprintf (file, "\tToken: ");
cp_lexer_print_token (file, token);
eloc = expand_location (token->location);
fprintf (file, "\n\tFile: %s\n", eloc.file);
fprintf (file, "\tLine: %d\n", eloc.line);
fprintf (file, "\tColumn: %d\n", eloc.column);
}
/* Allocate memory for a new lexer object and return it. */
static cp_lexer *
cp_lexer_alloc (void)
{
cp_lexer *lexer;
c_common_no_more_pch ();
/* Allocate the memory. */
lexer = ggc_alloc_cleared_cp_lexer ();
/* Initially we are not debugging. */
lexer->debugging_p = false;
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
/* Create the buffer. */
lexer->buffer = VEC_alloc (cp_token, gc, CP_LEXER_BUFFER_SIZE);
return lexer;
}
/* Create a new main C++ lexer, the lexer that gets tokens from the
preprocessor. */
static cp_lexer *
cp_lexer_new_main (void)
{
cp_lexer *lexer;
cp_token token;
/* It's possible that parsing the first pragma will load a PCH file,
which is a GC collection point. So we have to do that before
allocating any memory. */
cp_parser_initial_pragma (&token);
lexer = cp_lexer_alloc ();
/* Put the first token in the buffer. */
VEC_quick_push (cp_token, lexer->buffer, &token);
/* Get the remaining tokens from the preprocessor. */
while (token.type != CPP_EOF)
{
cp_lexer_get_preprocessor_token (lexer, &token);
VEC_safe_push (cp_token, gc, lexer->buffer, &token);
}
lexer->last_token = VEC_address (cp_token, lexer->buffer)
+ VEC_length (cp_token, lexer->buffer)
- 1;
lexer->next_token = VEC_length (cp_token, lexer->buffer)
? VEC_address (cp_token, lexer->buffer)
: &eof_token;
/* Subsequent preprocessor diagnostics should use compiler
diagnostic functions to get the compiler source location. */
done_lexing = true;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Create a new lexer whose token stream is primed with the tokens in
CACHE. When these tokens are exhausted, no new tokens will be read. */
static cp_lexer *
cp_lexer_new_from_tokens (cp_token_cache *cache)
{
cp_token *first = cache->first;
cp_token *last = cache->last;
cp_lexer *lexer = ggc_alloc_cleared_cp_lexer ();
/* We do not own the buffer. */
lexer->buffer = NULL;
lexer->next_token = first == last ? &eof_token : first;
lexer->last_token = last;
lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
CP_SAVED_TOKEN_STACK);
/* Initially we are not debugging. */
lexer->debugging_p = false;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Frees all resources associated with LEXER. */
static void
cp_lexer_destroy (cp_lexer *lexer)
{
VEC_free (cp_token, gc, lexer->buffer);
VEC_free (cp_token_position, heap, lexer->saved_tokens);
ggc_free (lexer);
}
/* Returns nonzero if debugging information should be output. */
static inline bool
cp_lexer_debugging_p (cp_lexer *lexer)
{
return lexer->debugging_p;
}
static inline cp_token_position
cp_lexer_token_position (cp_lexer *lexer, bool previous_p)
{
gcc_assert (!previous_p || lexer->next_token != &eof_token);
return lexer->next_token - previous_p;
}
static inline cp_token *
cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos)
{
return pos;
}
static inline void
cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos)
{
lexer->next_token = cp_lexer_token_at (lexer, pos);
}
static inline cp_token_position
cp_lexer_previous_token_position (cp_lexer *lexer)
{
if (lexer->next_token == &eof_token)
return lexer->last_token - 1;
else
return cp_lexer_token_position (lexer, true);
}
static inline cp_token *
cp_lexer_previous_token (cp_lexer *lexer)
{
cp_token_position tp = cp_lexer_previous_token_position (lexer);
return cp_lexer_token_at (lexer, tp);
}
/* nonzero if we are presently saving tokens. */
static inline int
cp_lexer_saving_tokens (const cp_lexer* lexer)
{
return VEC_length (cp_token_position, lexer->saved_tokens) != 0;
}
/* Store the next token from the preprocessor in *TOKEN. Return true
if we reach EOF. If LEXER is NULL, assume we are handling an
initial #pragma pch_preprocess, and thus want the lexer to return
processed strings. */
static void
cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token)
{
static int is_extern_c = 0;
/* Get a new token from the preprocessor. */
token->type
= c_lex_with_flags (&token->u.value, &token->location, &token->flags,
lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN);
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->purged_p = false;
/* On some systems, some header files are surrounded by an
implicit extern "C" block. Set a flag in the token if it
comes from such a header. */
is_extern_c += pending_lang_change;
pending_lang_change = 0;
token->implicit_extern_c = is_extern_c > 0;
/* Check to see if this token is a keyword. */
if (token->type == CPP_NAME)
{
if (C_IS_RESERVED_WORD (token->u.value))
{
/* Mark this token as a keyword. */
token->type = CPP_KEYWORD;
/* Record which keyword. */
token->keyword = C_RID_CODE (token->u.value);
}
else
{
if (warn_cxx0x_compat
&& C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X
&& C_RID_CODE (token->u.value) <= RID_LAST_CXX0X)
{
/* Warn about the C++0x keyword (but still treat it as
an identifier). */
warning (OPT_Wc__0x_compat,
"identifier %qE is a keyword in C++11",
token->u.value);
/* Clear out the C_RID_CODE so we don't warn about this
particular identifier-turned-keyword again. */
C_SET_RID_CODE (token->u.value, RID_MAX);
}
token->ambiguous_p = false;
token->keyword = RID_MAX;
}
}
else if (token->type == CPP_AT_NAME)
{
/* This only happens in Objective-C++; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->u.value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
default: token->keyword = C_RID_CODE (token->u.value);
}
}
else if (token->type == CPP_PRAGMA)
{
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = ((enum pragma_kind)
TREE_INT_CST_LOW (token->u.value));
token->u.value = NULL_TREE;
}
}
/* Update the globals input_location and the input file stack from TOKEN. */
static inline void
cp_lexer_set_source_position_from_token (cp_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Return a pointer to the next token in the token stream, but do not
consume it. */
static inline cp_token *
cp_lexer_peek_token (cp_lexer *lexer)
{
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token);
putc ('\n', cp_lexer_debug_stream);
}
return lexer->next_token;
}
/* Return true if the next token has the indicated TYPE. */
static inline bool
cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type)
{
return cp_lexer_peek_token (lexer)->type == type;
}
/* Return true if the next token does not have the indicated TYPE. */
static inline bool
cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type)
{
return !cp_lexer_next_token_is (lexer, type);
}
/* Return true if the next token is the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword == keyword;
}
/* Return true if the next token is not the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword != keyword;
}
/* Return true if the next token is a keyword for a decl-specifier. */
static bool
cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer)
{
cp_token *token;
token = cp_lexer_peek_token (lexer);
switch (token->keyword)
{
/* auto specifier: storage-class-specifier in C++,
simple-type-specifier in C++0x. */
case RID_AUTO:
/* Storage classes. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Elaborated type specifiers. */
case RID_ENUM:
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
case RID_TYPENAME:
/* Simple type specifiers. */
case RID_CHAR:
case RID_CHAR16:
case RID_CHAR32:
case RID_WCHAR:
case RID_BOOL:
case RID_SHORT:
case RID_INT:
case RID_LONG:
case RID_INT128:
case RID_SIGNED:
case RID_UNSIGNED:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
/* GNU extensions. */
case RID_ATTRIBUTE:
case RID_TYPEOF:
/* C++0x extensions. */
case RID_DECLTYPE:
case RID_UNDERLYING_TYPE:
return true;
default:
return false;
}
}
/* Returns TRUE iff the token T begins a decltype type. */
static bool
token_is_decltype (cp_token *t)
{
return (t->keyword == RID_DECLTYPE
|| t->type == CPP_DECLTYPE);
}
/* Returns TRUE iff the next token begins a decltype type. */
static bool
cp_lexer_next_token_is_decltype (cp_lexer *lexer)
{
cp_token *t = cp_lexer_peek_token (lexer);
return token_is_decltype (t);
}
/* Return a pointer to the Nth token in the token stream. If N is 1,
then this is precisely equivalent to cp_lexer_peek_token (except
that it is not inline). One would like to disallow that case, but
there is one case (cp_parser_nth_token_starts_template_id) where
the caller passes a variable for N and it might be 1. */
static cp_token *
cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n)
{
cp_token *token;
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream,
"cp_lexer: peeking ahead %ld at token: ", (long)n);
--n;
token = lexer->next_token;
gcc_assert (!n || token != &eof_token);
while (n != 0)
{
++token;
if (token == lexer->last_token)
{
token = &eof_token;
break;
}
if (!token->purged_p)
--n;
}
if (cp_lexer_debugging_p (lexer))
{
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Return the next token, and advance the lexer's next_token pointer
to point to the next non-purged token. */
static cp_token *
cp_lexer_consume_token (cp_lexer* lexer)
{
cp_token *token = lexer->next_token;
gcc_assert (token != &eof_token);
gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL);
do
{
lexer->next_token++;
if (lexer->next_token == lexer->last_token)
{
lexer->next_token = &eof_token;
break;
}
}
while (lexer->next_token->purged_p);
cp_lexer_set_source_position_from_token (token);
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Permanently remove the next token from the token stream, and
advance the next_token pointer to refer to the next non-purged
token. */
static void
cp_lexer_purge_token (cp_lexer *lexer)
{
cp_token *tok = lexer->next_token;
gcc_assert (tok != &eof_token);
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
do
{
tok++;
if (tok == lexer->last_token)
{
tok = &eof_token;
break;
}
}
while (tok->purged_p);
lexer->next_token = tok;
}
/* Permanently remove all tokens after TOK, up to, but not
including, the token that will be returned next by
cp_lexer_peek_token. */
static void
cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok)
{
cp_token *peek = lexer->next_token;
if (peek == &eof_token)
peek = lexer->last_token;
gcc_assert (tok < peek);
for ( tok += 1; tok != peek; tok += 1)
{
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
}
}
/* Begin saving tokens. All tokens consumed after this point will be
preserved. */
static void
cp_lexer_save_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n");
VEC_safe_push (cp_token_position, heap,
lexer->saved_tokens, lexer->next_token);
}
/* Commit to the portion of the token stream most recently saved. */
static void
cp_lexer_commit_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n");
VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Return all tokens saved since the last call to cp_lexer_save_tokens
to the token stream. Stop saving tokens. */
static void
cp_lexer_rollback_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n");
lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens);
}
/* Print a representation of the TOKEN on the STREAM. */
static void
cp_lexer_print_token (FILE * stream, cp_token *token)
{
/* We don't use cpp_type2name here because the parser defines
a few tokens of its own. */
static const char *const token_names[] = {
/* cpplib-defined token types */
#define OP(e, s) #e,
#define TK(e, s) #e,
TTYPE_TABLE
#undef OP
#undef TK
/* C++ parser token types - see "Manifest constants", above. */
"KEYWORD",
"TEMPLATE_ID",
"NESTED_NAME_SPECIFIER",
};
/* For some tokens, print the associated data. */
switch (token->type)
{
case CPP_KEYWORD:
/* Some keywords have a value that is not an IDENTIFIER_NODE.
For example, `struct' is mapped to an INTEGER_CST. */
if (TREE_CODE (token->u.value) != IDENTIFIER_NODE)
break;
/* else fall through */
case CPP_NAME:
fputs (IDENTIFIER_POINTER (token->u.value), stream);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value));
break;
case CPP_NUMBER:
print_generic_expr (stream, token->u.value, 0);
break;
default:
/* If we have a name for the token, print it out. Otherwise, we
simply give the numeric code. */
if (token->type < ARRAY_SIZE(token_names))
fputs (token_names[token->type], stream);
else
fprintf (stream, "[%d]", token->type);
break;
}
}
/* Start emitting debugging information. */
static void
cp_lexer_start_debugging (cp_lexer* lexer)
{
lexer->debugging_p = true;
cp_lexer_debug_stream = stderr;
}
/* Stop emitting debugging information. */
static void
cp_lexer_stop_debugging (cp_lexer* lexer)
{
lexer->debugging_p = false;
cp_lexer_debug_stream = NULL;
}
/* Create a new cp_token_cache, representing a range of tokens. */
static cp_token_cache *
cp_token_cache_new (cp_token *first, cp_token *last)
{
cp_token_cache *cache = ggc_alloc_cp_token_cache ();
cache->first = first;
cache->last = last;
return cache;
}
/* Decl-specifiers. */
/* Set *DECL_SPECS to represent an empty decl-specifier-seq. */
static void
clear_decl_specs (cp_decl_specifier_seq *decl_specs)
{
memset (decl_specs, 0, sizeof (cp_decl_specifier_seq));
}
/* Declarators. */
/* Nothing other than the parser should be creating declarators;
declarators are a semi-syntactic representation of C++ entities.
Other parts of the front end that need to create entities (like
VAR_DECLs or FUNCTION_DECLs) should do that directly. */
static cp_declarator *make_call_declarator
(cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, tree, tree);
static cp_declarator *make_array_declarator
(cp_declarator *, tree);
static cp_declarator *make_pointer_declarator
(cp_cv_quals, cp_declarator *);
static cp_declarator *make_reference_declarator
(cp_cv_quals, cp_declarator *, bool);
static cp_parameter_declarator *make_parameter_declarator
(cp_decl_specifier_seq *, cp_declarator *, tree);
static cp_declarator *make_ptrmem_declarator
(cp_cv_quals, tree, cp_declarator *);
/* An erroneous declarator. */
static cp_declarator *cp_error_declarator;
/* The obstack on which declarators and related data structures are
allocated. */
static struct obstack declarator_obstack;
/* Alloc BYTES from the declarator memory pool. */
static inline void *
alloc_declarator (size_t bytes)
{
return obstack_alloc (&declarator_obstack, bytes);
}
/* Allocate a declarator of the indicated KIND. Clear fields that are
common to all declarators. */
static cp_declarator *
make_declarator (cp_declarator_kind kind)
{
cp_declarator *declarator;
declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator));
declarator->kind = kind;
declarator->attributes = NULL_TREE;
declarator->declarator = NULL;
declarator->parameter_pack_p = false;
declarator->id_loc = UNKNOWN_LOCATION;
return declarator;
}
/* Make a declarator for a generalized identifier. If
QUALIFYING_SCOPE is non-NULL, the identifier is
QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just
UNQUALIFIED_NAME. SFK indicates the kind of special function this
is, if any. */
static cp_declarator *
make_id_declarator (tree qualifying_scope, tree unqualified_name,
special_function_kind sfk)
{
cp_declarator *declarator;
/* It is valid to write:
class C { void f(); };
typedef C D;
void D::f();
The standard is not clear about whether `typedef const C D' is
legal; as of 2002-09-15 the committee is considering that
question. EDG 3.0 allows that syntax. Therefore, we do as
well. */
if (qualifying_scope && TYPE_P (qualifying_scope))
qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope);
gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE
|| TREE_CODE (unqualified_name) == BIT_NOT_EXPR
|| TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR);
declarator = make_declarator (cdk_id);
declarator->u.id.qualifying_scope = qualifying_scope;
declarator->u.id.unqualified_name = unqualified_name;
declarator->u.id.sfk = sfk;
return declarator;
}
/* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list
of modifiers such as const or volatile to apply to the pointer
type, represented as identifiers. */
cp_declarator *
make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_pointer);
declarator->declarator = target;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = NULL_TREE;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Like make_pointer_declarator -- but for references. */
cp_declarator *
make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target,
bool rvalue_ref)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_reference);
declarator->declarator = target;
declarator->u.reference.qualifiers = cv_qualifiers;
declarator->u.reference.rvalue_ref = rvalue_ref;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Like make_pointer_declarator -- but for a pointer to a non-static
member of CLASS_TYPE. */
cp_declarator *
make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type,
cp_declarator *pointee)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_ptrmem);
declarator->declarator = pointee;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = class_type;
if (pointee)
{
declarator->parameter_pack_p = pointee->parameter_pack_p;
pointee->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Make a declarator for the function given by TARGET, with the
indicated PARMS. The CV_QUALIFIERS aply to the function, as in
"const"-qualified member function. The EXCEPTION_SPECIFICATION
indicates what exceptions can be thrown. */
cp_declarator *
make_call_declarator (cp_declarator *target,
tree parms,
cp_cv_quals cv_qualifiers,
cp_virt_specifiers virt_specifiers,
tree exception_specification,
tree late_return_type)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_function);
declarator->declarator = target;
declarator->u.function.parameters = parms;
declarator->u.function.qualifiers = cv_qualifiers;
declarator->u.function.virt_specifiers = virt_specifiers;
declarator->u.function.exception_specification = exception_specification;
declarator->u.function.late_return_type = late_return_type;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Make a declarator for an array of BOUNDS elements, each of which is
defined by ELEMENT. */
cp_declarator *
make_array_declarator (cp_declarator *element, tree bounds)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_array);
declarator->declarator = element;
declarator->u.array.bounds = bounds;
if (element)
{
declarator->id_loc = element->id_loc;
declarator->parameter_pack_p = element->parameter_pack_p;
element->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Determine whether the declarator we've seen so far can be a
parameter pack, when followed by an ellipsis. */
static bool
declarator_can_be_parameter_pack (cp_declarator *declarator)
{
/* Search for a declarator name, or any other declarator that goes
after the point where the ellipsis could appear in a parameter
pack. If we find any of these, then this declarator can not be
made into a parameter pack. */
bool found = false;
while (declarator && !found)
{
switch ((int)declarator->kind)
{
case cdk_id:
case cdk_array:
found = true;
break;
case cdk_error:
return true;
default:
declarator = declarator->declarator;
break;
}
}
return !found;
}
cp_parameter_declarator *no_parameters;
/* Create a parameter declarator with the indicated DECL_SPECIFIERS,
DECLARATOR and DEFAULT_ARGUMENT. */
cp_parameter_declarator *
make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree default_argument)
{
cp_parameter_declarator *parameter;
parameter = ((cp_parameter_declarator *)
alloc_declarator (sizeof (cp_parameter_declarator)));
parameter->next = NULL;
if (decl_specifiers)
parameter->decl_specifiers = *decl_specifiers;
else
clear_decl_specs (¶meter->decl_specifiers);
parameter->declarator = declarator;
parameter->default_argument = default_argument;
parameter->ellipsis_p = false;
return parameter;
}
/* Returns true iff DECLARATOR is a declaration for a function. */
static bool
function_declarator_p (const cp_declarator *declarator)
{
while (declarator)
{
if (declarator->kind == cdk_function
&& declarator->declarator->kind == cdk_id)
return true;
if (declarator->kind == cdk_id
|| declarator->kind == cdk_error)
return false;
declarator = declarator->declarator;
}
return false;
}
/* The parser. */
/* Overview
--------
A cp_parser parses the token stream as specified by the C++
grammar. Its job is purely parsing, not semantic analysis. For
example, the parser breaks the token stream into declarators,
expressions, statements, and other similar syntactic constructs.
It does not check that the types of the expressions on either side
of an assignment-statement are compatible, or that a function is
not declared with a parameter of type `void'.
The parser invokes routines elsewhere in the compiler to perform
semantic analysis and to build up the abstract syntax tree for the
code processed.
The parser (and the template instantiation code, which is, in a
way, a close relative of parsing) are the only parts of the
compiler that should be calling push_scope and pop_scope, or
related functions. The parser (and template instantiation code)
keeps track of what scope is presently active; everything else
should simply honor that. (The code that generates static
initializers may also need to set the scope, in order to check
access control correctly when emitting the initializers.)
Methodology
-----------
The parser is of the standard recursive-descent variety. Upcoming
tokens in the token stream are examined in order to determine which
production to use when parsing a non-terminal. Some C++ constructs
require arbitrary look ahead to disambiguate. For example, it is
impossible, in the general case, to tell whether a statement is an
expression or declaration without scanning the entire statement.
Therefore, the parser is capable of "parsing tentatively." When the
parser is not sure what construct comes next, it enters this mode.
Then, while we attempt to parse the construct, the parser queues up
error messages, rather than issuing them immediately, and saves the
tokens it consumes. If the construct is parsed successfully, the
parser "commits", i.e., it issues any queued error messages and
the tokens that were being preserved are permanently discarded.
If, however, the construct is not parsed successfully, the parser
rolls back its state completely so that it can resume parsing using
a different alternative.
Future Improvements
-------------------
The performance of the parser could probably be improved substantially.
We could often eliminate the need to parse tentatively by looking ahead
a little bit. In some places, this approach might not entirely eliminate
the need to parse tentatively, but it might still speed up the average
case. */
/* Flags that are passed to some parsing functions. These values can
be bitwise-ored together. */
enum
{
/* No flags. */
CP_PARSER_FLAGS_NONE = 0x0,
/* The construct is optional. If it is not present, then no error
should be issued. */
CP_PARSER_FLAGS_OPTIONAL = 0x1,
/* When parsing a type-specifier, treat user-defined type-names
as non-type identifiers. */
CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2,
/* When parsing a type-specifier, do not try to parse a class-specifier
or enum-specifier. */
CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4,
/* When parsing a decl-specifier-seq, only allow type-specifier or
constexpr. */
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8
};
/* This type is used for parameters and variables which hold
combinations of the above flags. */
typedef int cp_parser_flags;
/* The different kinds of declarators we want to parse. */
typedef enum cp_parser_declarator_kind
{
/* We want an abstract declarator. */
CP_PARSER_DECLARATOR_ABSTRACT,
/* We want a named declarator. */
CP_PARSER_DECLARATOR_NAMED,
/* We don't mind, but the name must be an unqualified-id. */
CP_PARSER_DECLARATOR_EITHER
} cp_parser_declarator_kind;
/* The precedence values used to parse binary expressions. The minimum value
of PREC must be 1, because zero is reserved to quickly discriminate
binary operators from other tokens. */
enum cp_parser_prec
{
PREC_NOT_OPERATOR,
PREC_LOGICAL_OR_EXPRESSION,
PREC_LOGICAL_AND_EXPRESSION,
PREC_INCLUSIVE_OR_EXPRESSION,
PREC_EXCLUSIVE_OR_EXPRESSION,
PREC_AND_EXPRESSION,
PREC_EQUALITY_EXPRESSION,
PREC_RELATIONAL_EXPRESSION,
PREC_SHIFT_EXPRESSION,
PREC_ADDITIVE_EXPRESSION,
PREC_MULTIPLICATIVE_EXPRESSION,
PREC_PM_EXPRESSION,
NUM_PREC_VALUES = PREC_PM_EXPRESSION
};
/* A mapping from a token type to a corresponding tree node type, with a
precedence value. */
typedef struct cp_parser_binary_operations_map_node
{
/* The token type. */
enum cpp_ttype token_type;
/* The corresponding tree code. */
enum tree_code tree_type;
/* The precedence of this operator. */
enum cp_parser_prec prec;
} cp_parser_binary_operations_map_node;
typedef struct cp_parser_expression_stack_entry
{
/* Left hand side of the binary operation we are currently
parsing. */
tree lhs;
/* Original tree code for left hand side, if it was a binary
expression itself (used for -Wparentheses). */
enum tree_code lhs_type;
/* Tree code for the binary operation we are parsing. */
enum tree_code tree_type;
/* Precedence of the binary operation we are parsing. */
enum cp_parser_prec prec;
} cp_parser_expression_stack_entry;
/* The stack for storing partial expressions. We only need NUM_PREC_VALUES
entries because precedence levels on the stack are monotonically
increasing. */
typedef struct cp_parser_expression_stack_entry
cp_parser_expression_stack[NUM_PREC_VALUES];
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser_context *cp_parser_context_new
(cp_parser_context *);
/* Class variables. */
static GTY((deletable)) cp_parser_context* cp_parser_context_free_list;
/* The operator-precedence table used by cp_parser_binary_expression.
Transformed into an associative array (binops_by_token) by
cp_parser_new. */
static const cp_parser_binary_operations_map_node binops[] = {
{ CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION },
{ CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION },
{ CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION },
{ CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION },
{ CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION },
{ CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION },
{ CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION }
};
/* The same as binops, but initialized by cp_parser_new so that
binops_by_token[N].token_type == N. Used in cp_parser_binary_expression
for speed. */
static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES];
/* Constructors and destructors. */
/* Construct a new context. The context below this one on the stack
is given by NEXT. */
static cp_parser_context *
cp_parser_context_new (cp_parser_context* next)
{
cp_parser_context *context;
/* Allocate the storage. */
if (cp_parser_context_free_list != NULL)
{
/* Pull the first entry from the free list. */
context = cp_parser_context_free_list;
cp_parser_context_free_list = context->next;
memset (context, 0, sizeof (*context));
}
else
context = ggc_alloc_cleared_cp_parser_context ();
/* No errors have occurred yet in this context. */
context->status = CP_PARSER_STATUS_KIND_NO_ERROR;
/* If this is not the bottommost context, copy information that we
need from the previous context. */
if (next)
{
/* If, in the NEXT context, we are parsing an `x->' or `x.'
expression, then we are parsing one in this context, too. */
context->object_type = next->object_type;
/* Thread the stack. */
context->next = next;
}
return context;
}
/* Managing the unparsed function queues. */
#define unparsed_funs_with_default_args \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_default_args
#define unparsed_funs_with_definitions \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_definitions
#define unparsed_nsdmis \
VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->nsdmis
static void
push_unparsed_function_queues (cp_parser *parser)
{
VEC_safe_push (cp_unparsed_functions_entry, gc,
parser->unparsed_queues, NULL);
unparsed_funs_with_default_args = NULL;
unparsed_funs_with_definitions = make_tree_vector ();
unparsed_nsdmis = NULL;
}
static void
pop_unparsed_function_queues (cp_parser *parser)
{
release_tree_vector (unparsed_funs_with_definitions);
VEC_pop (cp_unparsed_functions_entry, parser->unparsed_queues);
}
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser *cp_parser_new
(void);
/* Routines to parse various constructs.
Those that return `tree' will return the error_mark_node (rather
than NULL_TREE) if a parse error occurs, unless otherwise noted.
Sometimes, they will return an ordinary node if error-recovery was
attempted, even though a parse error occurred. So, to check
whether or not a parse error occurred, you should always use
cp_parser_error_occurred. If the construct is optional (indicated
either by an `_opt' in the name of the function that does the
parsing or via a FLAGS parameter), then NULL_TREE is returned if
the construct is not present. */
/* Lexical conventions [gram.lex] */
static tree cp_parser_identifier
(cp_parser *);
static tree cp_parser_string_literal
(cp_parser *, bool, bool);
static tree cp_parser_userdef_char_literal
(cp_parser *);
static tree cp_parser_userdef_string_literal
(cp_token *);
static tree cp_parser_userdef_numeric_literal
(cp_parser *);
/* Basic concepts [gram.basic] */
static bool cp_parser_translation_unit
(cp_parser *);
/* Expressions [gram.expr] */
static tree cp_parser_primary_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_id_expression
(cp_parser *, bool, bool, bool *, bool, bool);
static tree cp_parser_unqualified_id
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier_opt
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_qualifying_entity
(cp_parser *, bool, bool, bool, bool, bool);
static tree cp_parser_postfix_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_postfix_open_square_expression
(cp_parser *, tree, bool);
static tree cp_parser_postfix_dot_deref_expression
(cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t);
static VEC(tree,gc) *cp_parser_parenthesized_expression_list
(cp_parser *, int, bool, bool, bool *);
/* Values for the second parameter of cp_parser_parenthesized_expression_list. */
enum { non_attr = 0, normal_attr = 1, id_attr = 2 };
static void cp_parser_pseudo_destructor_name
(cp_parser *, tree *, tree *);
static tree cp_parser_unary_expression
(cp_parser *, bool, bool, cp_id_kind *);
static enum tree_code cp_parser_unary_operator
(cp_token *);
static tree cp_parser_new_expression
(cp_parser *);
static VEC(tree,gc) *cp_parser_new_placement
(cp_parser *);
static tree cp_parser_new_type_id
(cp_parser *, tree *);
static cp_declarator *cp_parser_new_declarator_opt
(cp_parser *);
static cp_declarator *cp_parser_direct_new_declarator
(cp_parser *);
static VEC(tree,gc) *cp_parser_new_initializer
(cp_parser *);
static tree cp_parser_delete_expression
(cp_parser *);
static tree cp_parser_cast_expression
(cp_parser *, bool, bool, cp_id_kind *);
static tree cp_parser_binary_expression
(cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *);
static tree cp_parser_question_colon_clause
(cp_parser *, tree);
static tree cp_parser_assignment_expression
(cp_parser *, bool, cp_id_kind *);
static enum tree_code cp_parser_assignment_operator_opt
(cp_parser *);
static tree cp_parser_expression
(cp_parser *, bool, cp_id_kind *);
static tree cp_parser_constant_expression
(cp_parser *, bool, bool *);
static tree cp_parser_builtin_offsetof
(cp_parser *);
static tree cp_parser_lambda_expression
(cp_parser *);
static void cp_parser_lambda_introducer
(cp_parser *, tree);
static bool cp_parser_lambda_declarator_opt
(cp_parser *, tree);
static void cp_parser_lambda_body
(cp_parser *, tree);
/* Statements [gram.stmt.stmt] */
static void cp_parser_statement
(cp_parser *, tree, bool, bool *);
static void cp_parser_label_for_labeled_statement
(cp_parser *);
static tree cp_parser_expression_statement
(cp_parser *, tree);
static tree cp_parser_compound_statement
(cp_parser *, tree, bool, bool);
static void cp_parser_statement_seq_opt
(cp_parser *, tree);
static tree cp_parser_selection_statement
(cp_parser *, bool *);
static tree cp_parser_condition
(cp_parser *);
static tree cp_parser_iteration_statement
(cp_parser *);
static bool cp_parser_for_init_statement
(cp_parser *, tree *decl);
static tree cp_parser_for
(cp_parser *);
static tree cp_parser_c_for
(cp_parser *, tree, tree);
static tree cp_parser_range_for
(cp_parser *, tree, tree, tree);
static void do_range_for_auto_deduction
(tree, tree);
static tree cp_parser_perform_range_for_lookup
(tree, tree *, tree *);
static tree cp_parser_range_for_member_function
(tree, tree);
static tree cp_parser_jump_statement
(cp_parser *);
static void cp_parser_declaration_statement
(cp_parser *);
static tree cp_parser_implicitly_scoped_statement
(cp_parser *, bool *);
static void cp_parser_already_scoped_statement
(cp_parser *);
/* Declarations [gram.dcl.dcl] */
static void cp_parser_declaration_seq_opt
(cp_parser *);
static void cp_parser_declaration
(cp_parser *);
static void cp_parser_block_declaration
(cp_parser *, bool);
static void cp_parser_simple_declaration
(cp_parser *, bool, tree *);
static void cp_parser_decl_specifier_seq
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *);
static tree cp_parser_storage_class_specifier_opt
(cp_parser *);
static tree cp_parser_function_specifier_opt
(cp_parser *, cp_decl_specifier_seq *);
static tree cp_parser_type_specifier
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool,
int *, bool *);
static tree cp_parser_simple_type_specifier
(cp_parser *, cp_decl_specifier_seq *, cp_parser_flags);
static tree cp_parser_type_name
(cp_parser *);
static tree cp_parser_nonclass_name
(cp_parser* parser);
static tree cp_parser_elaborated_type_specifier
(cp_parser *, bool, bool);
static tree cp_parser_enum_specifier
(cp_parser *);
static void cp_parser_enumerator_list
(cp_parser *, tree);
static void cp_parser_enumerator_definition
(cp_parser *, tree);
static tree cp_parser_namespace_name
(cp_parser *);
static void cp_parser_namespace_definition
(cp_parser *);
static void cp_parser_namespace_body
(cp_parser *);
static tree cp_parser_qualified_namespace_specifier
(cp_parser *);
static void cp_parser_namespace_alias_definition
(cp_parser *);
static bool cp_parser_using_declaration
(cp_parser *, bool);
static void cp_parser_using_directive
(cp_parser *);
static tree cp_parser_alias_declaration
(cp_parser *);
static void cp_parser_asm_definition
(cp_parser *);
static void cp_parser_linkage_specification
(cp_parser *);
static void cp_parser_static_assert
(cp_parser *, bool);
static tree cp_parser_decltype
(cp_parser *);
/* Declarators [gram.dcl.decl] */
static tree cp_parser_init_declarator
(cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *, tree *);
static cp_declarator *cp_parser_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool *, bool);
static cp_declarator *cp_parser_direct_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool);
static enum tree_code cp_parser_ptr_operator
(cp_parser *, tree *, cp_cv_quals *);
static cp_cv_quals cp_parser_cv_qualifier_seq_opt
(cp_parser *);
static cp_virt_specifiers cp_parser_virt_specifier_seq_opt
(cp_parser *);
static tree cp_parser_late_return_type_opt
(cp_parser *, cp_cv_quals);
static tree cp_parser_declarator_id
(cp_parser *, bool);
static tree cp_parser_type_id
(cp_parser *);
static tree cp_parser_template_type_arg
(cp_parser *);
static tree cp_parser_trailing_type_id (cp_parser *);
static tree cp_parser_type_id_1
(cp_parser *, bool, bool);
static void cp_parser_type_specifier_seq
(cp_parser *, bool, bool, cp_decl_specifier_seq *);
static tree cp_parser_parameter_declaration_clause
(cp_parser *);
static tree cp_parser_parameter_declaration_list
(cp_parser *, bool *);
static cp_parameter_declarator *cp_parser_parameter_declaration
(cp_parser *, bool, bool *);
static tree cp_parser_default_argument
(cp_parser *, bool);
static void cp_parser_function_body
(cp_parser *);
static tree cp_parser_initializer
(cp_parser *, bool *, bool *);
static tree cp_parser_initializer_clause
(cp_parser *, bool *);
static tree cp_parser_braced_list
(cp_parser*, bool*);
static VEC(constructor_elt,gc) *cp_parser_initializer_list
(cp_parser *, bool *);
static bool cp_parser_ctor_initializer_opt_and_function_body
(cp_parser *);
/* Classes [gram.class] */
static tree cp_parser_class_name
(cp_parser *, bool, bool, enum tag_types, bool, bool, bool);
static tree cp_parser_class_specifier
(cp_parser *);
static tree cp_parser_class_head
(cp_parser *, bool *);
static enum tag_types cp_parser_class_key
(cp_parser *);
static void cp_parser_member_specification_opt
(cp_parser *);
static void cp_parser_member_declaration
(cp_parser *);
static tree cp_parser_pure_specifier
(cp_parser *);
static tree cp_parser_constant_initializer
(cp_parser *);
/* Derived classes [gram.class.derived] */
static tree cp_parser_base_clause
(cp_parser *);
static tree cp_parser_base_specifier
(cp_parser *);
/* Special member functions [gram.special] */
static tree cp_parser_conversion_function_id
(cp_parser *);
static tree cp_parser_conversion_type_id
(cp_parser *);
static cp_declarator *cp_parser_conversion_declarator_opt
(cp_parser *);
static bool cp_parser_ctor_initializer_opt
(cp_parser *);
static void cp_parser_mem_initializer_list
(cp_parser *);
static tree cp_parser_mem_initializer
(cp_parser *);
static tree cp_parser_mem_initializer_id
(cp_parser *);
/* Overloading [gram.over] */
static tree cp_parser_operator_function_id
(cp_parser *);
static tree cp_parser_operator
(cp_parser *);
/* Templates [gram.temp] */
static void cp_parser_template_declaration
(cp_parser *, bool);
static tree cp_parser_template_parameter_list
(cp_parser *);
static tree cp_parser_template_parameter
(cp_parser *, bool *, bool *);
static tree cp_parser_type_parameter
(cp_parser *, bool *);
static tree cp_parser_template_id
(cp_parser *, bool, bool, bool);
static tree cp_parser_template_name
(cp_parser *, bool, bool, bool, bool *);
static tree cp_parser_template_argument_list
(cp_parser *);
static tree cp_parser_template_argument
(cp_parser *);
static void cp_parser_explicit_instantiation
(cp_parser *);
static void cp_parser_explicit_specialization
(cp_parser *);
/* Exception handling [gram.exception] */
static tree cp_parser_try_block
(cp_parser *);
static bool cp_parser_function_try_block
(cp_parser *);
static void cp_parser_handler_seq
(cp_parser *);
static void cp_parser_handler
(cp_parser *);
static tree cp_parser_exception_declaration
(cp_parser *);
static tree cp_parser_throw_expression
(cp_parser *);
static tree cp_parser_exception_specification_opt
(cp_parser *);
static tree cp_parser_type_id_list
(cp_parser *);
/* GNU Extensions */
static tree cp_parser_asm_specification_opt
(cp_parser *);
static tree cp_parser_asm_operand_list
(cp_parser *);
static tree cp_parser_asm_clobber_list
(cp_parser *);
static tree cp_parser_asm_label_list
(cp_parser *);
static tree cp_parser_attributes_opt
(cp_parser *);
static tree cp_parser_attribute_list
(cp_parser *);
static bool cp_parser_extension_opt
(cp_parser *, int *);
static void cp_parser_label_declaration
(cp_parser *);
/* Transactional Memory Extensions */
static tree cp_parser_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_expression
(cp_parser *, enum rid);
static bool cp_parser_function_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_cancel
(cp_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool cp_parser_pragma
(cp_parser *, enum pragma_context);
/* Objective-C++ Productions */
static tree cp_parser_objc_message_receiver
(cp_parser *);
static tree cp_parser_objc_message_args
(cp_parser *);
static tree cp_parser_objc_message_expression
(cp_parser *);
static tree cp_parser_objc_encode_expression
(cp_parser *);
static tree cp_parser_objc_defs_expression
(cp_parser *);
static tree cp_parser_objc_protocol_expression
(cp_parser *);
static tree cp_parser_objc_selector_expression
(cp_parser *);
static tree cp_parser_objc_expression
(cp_parser *);
static bool cp_parser_objc_selector_p
(enum cpp_ttype);
static tree cp_parser_objc_selector
(cp_parser *);
static tree cp_parser_objc_protocol_refs_opt
(cp_parser *);
static void cp_parser_objc_declaration
(cp_parser *, tree);
static tree cp_parser_objc_statement
(cp_parser *);
static bool cp_parser_objc_valid_prefix_attributes
(cp_parser *, tree *);
static void cp_parser_objc_at_property_declaration
(cp_parser *) ;
static void cp_parser_objc_at_synthesize_declaration
(cp_parser *) ;
static void cp_parser_objc_at_dynamic_declaration
(cp_parser *) ;
static tree cp_parser_objc_struct_declaration
(cp_parser *) ;
/* Utility Routines */
static tree cp_parser_lookup_name
(cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t);
static tree cp_parser_lookup_name_simple
(cp_parser *, tree, location_t);
static tree cp_parser_maybe_treat_template_as_class
(tree, bool);
static bool cp_parser_check_declarator_template_parameters
(cp_parser *, cp_declarator *, location_t);
static bool cp_parser_check_template_parameters
(cp_parser *, unsigned, location_t, cp_declarator *);
static tree cp_parser_simple_cast_expression
(cp_parser *);
static tree cp_parser_global_scope_opt
(cp_parser *, bool);
static bool cp_parser_constructor_declarator_p
(cp_parser *, bool);
static tree cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *);
static tree cp_parser_function_definition_after_declarator
(cp_parser *, bool);
static void cp_parser_template_declaration_after_export
(cp_parser *, bool);
static void cp_parser_perform_template_parameter_access_checks
(VEC (deferred_access_check,gc)*);
static tree cp_parser_single_declaration
(cp_parser *, VEC (deferred_access_check,gc)*, bool, bool, bool *);
static tree cp_parser_functional_cast
(cp_parser *, tree);
static tree cp_parser_save_member_function_body
(cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree);
static tree cp_parser_save_nsdmi
(cp_parser *);
static tree cp_parser_enclosed_template_argument_list
(cp_parser *);
static void cp_parser_save_default_args
(cp_parser *, tree);
static void cp_parser_late_parsing_for_member
(cp_parser *, tree);
static tree cp_parser_late_parse_one_default_arg
(cp_parser *, tree, tree, tree);
static void cp_parser_late_parsing_nsdmi
(cp_parser *, tree);
static void cp_parser_late_parsing_default_args
(cp_parser *, tree);
static tree cp_parser_sizeof_operand
(cp_parser *, enum rid);
static tree cp_parser_trait_expr
(cp_parser *, enum rid);
static bool cp_parser_declares_only_class_p
(cp_parser *);
static void cp_parser_set_storage_class
(cp_parser *, cp_decl_specifier_seq *, enum rid, location_t);
static void cp_parser_set_decl_spec_type
(cp_decl_specifier_seq *, tree, location_t, bool);
static bool cp_parser_friend_p
(const cp_decl_specifier_seq *);
static void cp_parser_required_error
(cp_parser *, required_token, bool);
static cp_token *cp_parser_require
(cp_parser *, enum cpp_ttype, required_token);
static cp_token *cp_parser_require_keyword
(cp_parser *, enum rid, required_token);
static bool cp_parser_token_starts_function_definition_p
(cp_token *);
static bool cp_parser_next_token_starts_class_definition_p
(cp_parser *);
static bool cp_parser_next_token_ends_template_argument_p
(cp_parser *);
static bool cp_parser_nth_token_starts_template_argument_list_p
(cp_parser *, size_t);
static enum tag_types cp_parser_token_is_class_key
(cp_token *);
static void cp_parser_check_class_key
(enum tag_types, tree type);
static void cp_parser_check_access_in_redeclaration
(tree type, location_t location);
static bool cp_parser_optional_template_keyword
(cp_parser *);
static void cp_parser_pre_parsed_nested_name_specifier
(cp_parser *);
static bool cp_parser_cache_group
(cp_parser *, enum cpp_ttype, unsigned);
static tree cp_parser_cache_defarg
(cp_parser *parser, bool nsdmi);
static void cp_parser_parse_tentatively
(cp_parser *);
static void cp_parser_commit_to_tentative_parse
(cp_parser *);
static void cp_parser_abort_tentative_parse
(cp_parser *);
static bool cp_parser_parse_definitely
(cp_parser *);
static inline bool cp_parser_parsing_tentatively
(cp_parser *);
static bool cp_parser_uncommitted_to_tentative_parse_p
(cp_parser *);
static void cp_parser_error
(cp_parser *, const char *);
static void cp_parser_name_lookup_error
(cp_parser *, tree, tree, name_lookup_error, location_t);
static bool cp_parser_simulate_error
(cp_parser *);
static bool cp_parser_check_type_definition
(cp_parser *);
static void cp_parser_check_for_definition_in_return_type
(cp_declarator *, tree, location_t type_location);
static void cp_parser_check_for_invalid_template_id
(cp_parser *, tree, location_t location);
static bool cp_parser_non_integral_constant_expression
(cp_parser *, non_integral_constant);
static void cp_parser_diagnose_invalid_type_name
(cp_parser *, tree, tree, location_t);
static bool cp_parser_parse_and_diagnose_invalid_type_name
(cp_parser *);
static int cp_parser_skip_to_closing_parenthesis
(cp_parser *, bool, bool, bool);
static void cp_parser_skip_to_end_of_statement
(cp_parser *);
static void cp_parser_consume_semicolon_at_end_of_statement
(cp_parser *);
static void cp_parser_skip_to_end_of_block_or_statement
(cp_parser *);
static bool cp_parser_skip_to_closing_brace
(cp_parser *);
static void cp_parser_skip_to_end_of_template_parameter_list
(cp_parser *);
static void cp_parser_skip_to_pragma_eol
(cp_parser*, cp_token *);
static bool cp_parser_error_occurred
(cp_parser *);
static bool cp_parser_allow_gnu_extensions_p
(cp_parser *);
static bool cp_parser_is_pure_string_literal
(cp_token *);
static bool cp_parser_is_string_literal
(cp_token *);
static bool cp_parser_is_keyword
(cp_token *, enum rid);
static tree cp_parser_make_typename_type
(cp_parser *, tree, tree, location_t location);
static cp_declarator * cp_parser_make_indirect_declarator
(enum tree_code, tree, cp_cv_quals, cp_declarator *);
/* Returns nonzero if we are parsing tentatively. */
static inline bool
cp_parser_parsing_tentatively (cp_parser* parser)
{
return parser->context->next != NULL;
}
/* Returns nonzero if TOKEN is a string literal. */
static bool
cp_parser_is_pure_string_literal (cp_token* token)
{
return (token->type == CPP_STRING ||
token->type == CPP_STRING16 ||
token->type == CPP_STRING32 ||
token->type == CPP_WSTRING ||
token->type == CPP_UTF8STRING);
}
/* Returns nonzero if TOKEN is a string literal
of a user-defined string literal. */
static bool
cp_parser_is_string_literal (cp_token* token)
{
return (cp_parser_is_pure_string_literal (token) ||
token->type == CPP_STRING_USERDEF ||
token->type == CPP_STRING16_USERDEF ||
token->type == CPP_STRING32_USERDEF ||
token->type == CPP_WSTRING_USERDEF ||
token->type == CPP_UTF8STRING_USERDEF);
}
/* Returns nonzero if TOKEN is the indicated KEYWORD. */
static bool
cp_parser_is_keyword (cp_token* token, enum rid keyword)
{
return token->keyword == keyword;
}
/* If not parsing tentatively, issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream. MESSAGE
(specified by the caller) is usually of the form "expected
OTHER-TOKEN". */
static void
cp_parser_error (cp_parser* parser, const char* gmsgid)
{
if (!cp_parser_simulate_error (parser))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* This diagnostic makes more sense if it is tagged to the line
of the token we just peeked at. */
cp_lexer_set_source_position_from_token (token);
if (token->type == CPP_PRAGMA)
{
error_at (token->location,
"%<#pragma%> is not allowed here");
cp_parser_skip_to_pragma_eol (parser, token);
return;
}
c_parse_error (gmsgid,
/* Because c_parser_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->u.value, token->flags);
}
}
/* Issue an error about name-lookup failing. NAME is the
IDENTIFIER_NODE DECL is the result of
the lookup (as returned from cp_parser_lookup_name). DESIRED is
the thing that we hoped to find. */
static void
cp_parser_name_lookup_error (cp_parser* parser,
tree name,
tree decl,
name_lookup_error desired,
location_t location)
{
/* If name lookup completely failed, tell the user that NAME was not
declared. */
if (decl == error_mark_node)
{
if (parser->scope && parser->scope != global_namespace)
error_at (location, "%<%E::%E%> has not been declared",
parser->scope, name);
else if (parser->scope == global_namespace)
error_at (location, "%<::%E%> has not been declared", name);
else if (parser->object_scope
&& !CLASS_TYPE_P (parser->object_scope))
error_at (location, "request for member %qE in non-class type %qT",
name, parser->object_scope);
else if (parser->object_scope)
error_at (location, "%<%T::%E%> has not been declared",
parser->object_scope, name);
else
error_at (location, "%qE has not been declared", name);
}
else if (parser->scope && parser->scope != global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<%E::%E%> is not a type",
parser->scope, name);
break;
case NLE_CXX98:
error_at (location, "%<%E::%E%> is not a class or namespace",
parser->scope, name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<%E::%E%> is not a class, namespace, or enumeration",
parser->scope, name);
break;
default:
gcc_unreachable ();
}
}
else if (parser->scope == global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<::%E%> is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%<::%E%> is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<::%E%> is not a class, namespace, or enumeration",
name);
break;
default:
gcc_unreachable ();
}
}
else
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%qE is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%qE is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%qE is not a class, namespace, or enumeration", name);
break;
default:
gcc_unreachable ();
}
}
}
/* If we are parsing tentatively, remember that an error has occurred
during this tentative parse. Returns true if the error was
simulated; false if a message should be issued by the caller. */
static bool
cp_parser_simulate_error (cp_parser* parser)
{
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
parser->context->status = CP_PARSER_STATUS_KIND_ERROR;
return true;
}
return false;
}
/* Check for repeated decl-specifiers. */
static void
cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs,
location_t location)
{
int ds;
for (ds = ds_first; ds != ds_last; ++ds)
{
unsigned count = decl_specs->specs[ds];
if (count < 2)
continue;
/* The "long" specifier is a special case because of "long long". */
if (ds == ds_long)
{
if (count > 2)
error_at (location, "%<long long long%> is too long for GCC");
else
pedwarn_cxx98 (location, OPT_Wlong_long,
"ISO C++ 1998 does not support %<long long%>");
}
else if (count > 1)
{
static const char *const decl_spec_names[] = {
"signed",
"unsigned",
"short",
"long",
"const",
"volatile",
"restrict",
"inline",
"virtual",
"explicit",
"friend",
"typedef",
"using",
"constexpr",
"__complex",
"__thread"
};
error_at (location, "duplicate %qs", decl_spec_names[ds]);
}
}
}
/* This function is called when a type is defined. If type
definitions are forbidden at this point, an error message is
issued. */
static bool
cp_parser_check_type_definition (cp_parser* parser)
{
/* If types are forbidden here, issue a message. */
if (parser->type_definition_forbidden_message)
{
/* Don't use `%s' to print the string, because quotations (`%<', `%>')
in the message need to be interpreted. */
error (parser->type_definition_forbidden_message);
return false;
}
return true;
}
/* This function is called when the DECLARATOR is processed. The TYPE
was a type defined in the decl-specifiers. If it is invalid to
define a type in the decl-specifiers for DECLARATOR, an error is
issued. TYPE_LOCATION is the location of TYPE and is used
for error reporting. */
static void
cp_parser_check_for_definition_in_return_type (cp_declarator *declarator,
tree type, location_t type_location)
{
/* [dcl.fct] forbids type definitions in return types.
Unfortunately, it's not easy to know whether or not we are
processing a return type until after the fact. */
while (declarator
&& (declarator->kind == cdk_pointer
|| declarator->kind == cdk_reference
|| declarator->kind == cdk_ptrmem))
declarator = declarator->declarator;
if (declarator
&& declarator->kind == cdk_function)
{
error_at (type_location,
"new types may not be defined in a return type");
inform (type_location,
"(perhaps a semicolon is missing after the definition of %qT)",
type);
}
}
/* A type-specifier (TYPE) has been parsed which cannot be followed by
"<" in any valid C++ program. If the next token is indeed "<",
issue a message warning the user about what appears to be an
invalid attempt to form a template-id. LOCATION is the location
of the type-specifier (TYPE) */
static void
cp_parser_check_for_invalid_template_id (cp_parser* parser,
tree type, location_t location)
{
cp_token_position start = 0;
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
if (TYPE_P (type))
error_at (location, "%qT is not a template", type);
else if (TREE_CODE (type) == IDENTIFIER_NODE)
error_at (location, "%qE is not a template", type);
else
error_at (location, "invalid template-id");
/* Remember the location of the invalid "<". */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Consume the "<". */
cp_lexer_consume_token (parser->lexer);
/* Parse the template arguments. */
cp_parser_enclosed_template_argument_list (parser);
/* Permanently remove the invalid template arguments so that
this error message is not issued again. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
}
}
/* If parsing an integral constant-expression, issue an error message
about the fact that THING appeared and return true. Otherwise,
return false. In either case, set
PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */
static bool
cp_parser_non_integral_constant_expression (cp_parser *parser,
non_integral_constant thing)
{
parser->non_integral_constant_expression_p = true;
if (parser->integral_constant_expression_p)
{
if (!parser->allow_non_integral_constant_expression_p)
{
const char *msg = NULL;
switch (thing)
{
case NIC_FLOAT:
error ("floating-point literal "
"cannot appear in a constant-expression");
return true;
case NIC_CAST:
error ("a cast to a type other than an integral or "
"enumeration type cannot appear in a "
"constant-expression");
return true;
case NIC_TYPEID:
error ("%<typeid%> operator "
"cannot appear in a constant-expression");
return true;
case NIC_NCC:
error ("non-constant compound literals "
"cannot appear in a constant-expression");
return true;
case NIC_FUNC_CALL:
error ("a function call "
"cannot appear in a constant-expression");
return true;
case NIC_INC:
error ("an increment "
"cannot appear in a constant-expression");
return true;
case NIC_DEC:
error ("an decrement "
"cannot appear in a constant-expression");
return true;
case NIC_ARRAY_REF:
error ("an array reference "
"cannot appear in a constant-expression");
return true;
case NIC_ADDR_LABEL:
error ("the address of a label "
"cannot appear in a constant-expression");
return true;
case NIC_OVERLOADED:
error ("calls to overloaded operators "
"cannot appear in a constant-expression");
return true;
case NIC_ASSIGNMENT:
error ("an assignment cannot appear in a constant-expression");
return true;
case NIC_COMMA:
error ("a comma operator "
"cannot appear in a constant-expression");
return true;
case NIC_CONSTRUCTOR:
error ("a call to a constructor "
"cannot appear in a constant-expression");
return true;
case NIC_TRANSACTION:
error ("a transaction expression "
"cannot appear in a constant-expression");
return true;
case NIC_THIS:
msg = "this";
break;
case NIC_FUNC_NAME:
msg = "__FUNCTION__";
break;
case NIC_PRETTY_FUNC:
msg = "__PRETTY_FUNCTION__";
break;
case NIC_C99_FUNC:
msg = "__func__";
break;
case NIC_VA_ARG:
msg = "va_arg";
break;
case NIC_ARROW:
msg = "->";
break;
case NIC_POINT:
msg = ".";
break;
case NIC_STAR:
msg = "*";
break;
case NIC_ADDR:
msg = "&";
break;
case NIC_PREINCREMENT:
msg = "++";
break;
case NIC_PREDECREMENT:
msg = "--";
break;
case NIC_NEW:
msg = "new";
break;
case NIC_DEL:
msg = "delete";
break;
default:
gcc_unreachable ();
}
if (msg)
error ("%qs cannot appear in a constant-expression", msg);
return true;
}
}
return false;
}
/* Emit a diagnostic for an invalid type name. SCOPE is the
qualifying scope (or NULL, if none) for ID. This function commits
to the current active tentative parse, if any. (Otherwise, the
problematic construct might be encountered again later, resulting
in duplicate error messages.) LOCATION is the location of ID. */
static void
cp_parser_diagnose_invalid_type_name (cp_parser *parser,
tree scope, tree id,
location_t location)
{
tree decl, old_scope;
cp_parser_commit_to_tentative_parse (parser);
/* Try to lookup the identifier. */
old_scope = parser->scope;
parser->scope = scope;
decl = cp_parser_lookup_name_simple (parser, id, location);
parser->scope = old_scope;
/* If the lookup found a template-name, it means that the user forgot
to specify an argument list. Emit a useful error message. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (location,
"invalid use of template-name %qE without an argument list",
decl);
else if (TREE_CODE (id) == BIT_NOT_EXPR)
error_at (location, "invalid use of destructor %qD as a type", id);
else if (TREE_CODE (decl) == TYPE_DECL)
/* Something like 'unsigned A a;' */
error_at (location, "invalid combination of multiple type-specifiers");
else if (!parser->scope)
{
/* Issue an error message. */
error_at (location, "%qE does not name a type", id);
/* If we're in a template class, it's possible that the user was
referring to a type from a base class. For example:
template <typename T> struct A { typedef T X; };
template <typename T> struct B : public A<T> { X x; };
The user should have said "typename A<T>::X". */
if (cxx_dialect < cxx0x && id == ridpointers[(int)RID_CONSTEXPR])
inform (location, "C++11 %<constexpr%> only available with "
"-std=c++11 or -std=gnu++11");
else if (processing_template_decl && current_class_type
&& TYPE_BINFO (current_class_type))
{
tree b;
for (b = TREE_CHAIN (TYPE_BINFO (current_class_type));
b;
b = TREE_CHAIN (b))
{
tree base_type = BINFO_TYPE (b);
if (CLASS_TYPE_P (base_type)
&& dependent_type_p (base_type))
{
tree field;
/* Go from a particular instantiation of the
template (which will have an empty TYPE_FIELDs),
to the main version. */
base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type);
for (field = TYPE_FIELDS (base_type);
field;
field = DECL_CHAIN (field))
if (TREE_CODE (field) == TYPE_DECL
&& DECL_NAME (field) == id)
{
inform (location,
"(perhaps %<typename %T::%E%> was intended)",
BINFO_TYPE (b), id);
break;
}
if (field)
break;
}
}
}
}
/* Here we diagnose qualified-ids where the scope is actually correct,
but the identifier does not resolve to a valid type name. */
else if (parser->scope != error_mark_node)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error_at (location, "%qE in namespace %qE does not name a type",
id, parser->scope);
else if (CLASS_TYPE_P (parser->scope)
&& constructor_name_p (id, parser->scope))
{
/* A<T>::A<T>() */
error_at (location, "%<%T::%E%> names the constructor, not"
" the type", parser->scope, id);
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
error_at (location, "and %qT has no template constructors",
parser->scope);
}
else if (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope))
error_at (location, "need %<typename%> before %<%T::%E%> because "
"%qT is a dependent scope",
parser->scope, id, parser->scope);
else if (TYPE_P (parser->scope))
error_at (location, "%qE in %q#T does not name a type",
id, parser->scope);
else
gcc_unreachable ();
}
}
/* Check for a common situation where a type-name should be present,
but is not, and issue a sensible error message. Returns true if an
invalid type-name was detected.
The situation handled by this function are variable declarations of the
form `ID a', where `ID' is an id-expression and `a' is a plain identifier.
Usually, `ID' should name a type, but if we got here it means that it
does not. We try to emit the best possible error message depending on
how exactly the id-expression looks like. */
static bool
cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser)
{
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Avoid duplicate error about ambiguous lookup. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next->type == CPP_NAME && next->ambiguous_p)
goto out;
}
cp_parser_parse_tentatively (parser);
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/true,
/*optional_p=*/false);
/* If the next token is a (, this is a function with no explicit return
type, i.e. constructor, destructor or conversion op. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| TREE_CODE (id) == TYPE_DECL)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
if (!cp_parser_parse_definitely (parser))
return false;
/* Emit a diagnostic for the invalid type. */
cp_parser_diagnose_invalid_type_name (parser, parser->scope,
id, token->location);
out:
/* If we aren't in the middle of a declarator (i.e. in a
parameter-declaration-clause), skip to the end of the declaration;
there's no point in trying to process it. */
if (!parser->in_declarator_p)
cp_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
/* Consume tokens up to, and including, the next non-nested closing `)'.
Returns 1 iff we found a closing `)'. RECOVERING is true, if we
are doing error recovery. Returns -1 if OR_COMMA is true and we
found an unnested comma. */
static int
cp_parser_skip_to_closing_parenthesis (cp_parser *parser,
bool recovering,
bool or_comma,
bool consume_paren)
{
unsigned paren_depth = 0;
unsigned brace_depth = 0;
unsigned square_depth = 0;
if (recovering && !or_comma
&& cp_parser_uncommitted_to_tentative_parse_p (parser))
return 0;
while (true)
{
cp_token * token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, then there is no closing `)'. */
return 0;
/* This is good for lambda expression capture-lists. */
case CPP_OPEN_SQUARE:
++square_depth;
break;
case CPP_CLOSE_SQUARE:
if (!square_depth--)
return 0;
break;
case CPP_SEMICOLON:
/* This matches the processing in skip_to_end_of_statement. */
if (!brace_depth)
return 0;
break;
case CPP_OPEN_BRACE:
++brace_depth;
break;
case CPP_CLOSE_BRACE:
if (!brace_depth--)
return 0;
break;
case CPP_COMMA:
if (recovering && or_comma && !brace_depth && !paren_depth
&& !square_depth)
return -1;
break;
case CPP_OPEN_PAREN:
if (!brace_depth)
++paren_depth;
break;
case CPP_CLOSE_PAREN:
if (!brace_depth && !paren_depth--)
{
if (consume_paren)
cp_lexer_consume_token (parser->lexer);
return 1;
}
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the current statement.
Normally, that will be just before consuming a `;'. However, if a
non-nested `}' comes first, then we stop before consuming that. */
static void
cp_parser_skip_to_end_of_statement (cp_parser* parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* If the next token is a `;', we have reached the end of the
statement. */
if (!nesting_depth)
return;
break;
case CPP_CLOSE_BRACE:
/* If this is a non-nested '}', stop before consuming it.
That way, when confronted with something like:
{ 3 + }
we stop before consuming the closing '}', even though we
have not yet reached a `;'. */
if (nesting_depth == 0)
return;
/* If it is the closing '}' for a block that we have
scanned, stop -- but only after consuming the token.
That way given:
void f g () { ... }
typedef int I;
we will stop after the body of the erroneously declared
function, but before consuming the following `typedef'
declaration. */
if (--nesting_depth == 0)
{
cp_lexer_consume_token (parser->lexer);
return;
}
case CPP_OPEN_BRACE:
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* This function is called at the end of a statement or declaration.
If the next token is a semicolon, it is consumed; otherwise, error
recovery is attempted. */
static void
cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser)
{
/* Look for the trailing `;'. */
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
{
/* If there is additional (erroneous) input, skip to the end of
the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested `;'. */
static void
cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser)
{
int nesting_depth = 0;
while (nesting_depth >= 0)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* Stop if this is an unnested ';'. */
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_CLOSE_BRACE:
/* Stop if this is an unnested '}', or closes the outermost
nesting level. */
nesting_depth--;
if (nesting_depth < 0)
return;
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_OPEN_BRACE:
/* Nest. */
nesting_depth++;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until a non-nested closing curly brace is the next
token, or there are no more tokens. Return true in the first case,
false otherwise. */
static bool
cp_parser_skip_to_closing_brace (cp_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return false;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested `}', then we have reached
the end of the current block. */
if (nesting_depth-- == 0)
return true;
break;
case CPP_OPEN_BRACE:
/* If it the next token is a `{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK
parameter is the PRAGMA token, allowing us to purge the entire pragma
sequence. */
static void
cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok)
{
cp_token *token;
parser->lexer->in_pragma = false;
do
token = cp_lexer_consume_token (parser->lexer);
while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF);
/* Ensure that the pragma is not parsed again. */
cp_lexer_purge_tokens_after (parser->lexer, pragma_tok);
}
/* Require pragma end of line, resyncing with it as necessary. The
arguments are as for cp_parser_skip_to_pragma_eol. */
static void
cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok)
{
parser->lexer->in_pragma = false;
if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
}
/* This is a simple wrapper around make_typename_type. When the id is
an unresolved identifier node, we can provide a superior diagnostic
using cp_parser_diagnose_invalid_type_name. */
static tree
cp_parser_make_typename_type (cp_parser *parser, tree scope,
tree id, location_t id_location)
{
tree result;
if (TREE_CODE (id) == IDENTIFIER_NODE)
{
result = make_typename_type (scope, id, typename_type,
/*complain=*/tf_none);
if (result == error_mark_node)
cp_parser_diagnose_invalid_type_name (parser, scope, id, id_location);
return result;
}
return make_typename_type (scope, id, typename_type, tf_error);
}
/* This is a wrapper around the
make_{pointer,ptrmem,reference}_declarator functions that decides
which one to call based on the CODE and CLASS_TYPE arguments. The
CODE argument should be one of the values returned by
cp_parser_ptr_operator. */
static cp_declarator *
cp_parser_make_indirect_declarator (enum tree_code code, tree class_type,
cp_cv_quals cv_qualifiers,
cp_declarator *target)
{
if (code == ERROR_MARK)
return cp_error_declarator;
if (code == INDIRECT_REF)
if (class_type == NULL_TREE)
return make_pointer_declarator (cv_qualifiers, target);
else
return make_ptrmem_declarator (cv_qualifiers, class_type, target);
else if (code == ADDR_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target, false);
else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target, true);
gcc_unreachable ();
}
/* Create a new C++ parser. */
static cp_parser *
cp_parser_new (void)
{
cp_parser *parser;
cp_lexer *lexer;
unsigned i;
/* cp_lexer_new_main is called before doing GC allocation because
cp_lexer_new_main might load a PCH file. */
lexer = cp_lexer_new_main ();
/* Initialize the binops_by_token so that we can get the tree
directly from the token. */
for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++)
binops_by_token[binops[i].token_type] = binops[i];
parser = ggc_alloc_cleared_cp_parser ();
parser->lexer = lexer;
parser->context = cp_parser_context_new (NULL);
/* For now, we always accept GNU extensions. */
parser->allow_gnu_extensions_p = 1;
/* The `>' token is a greater-than operator, not the end of a
template-id. */
parser->greater_than_is_operator_p = true;
parser->default_arg_ok_p = true;
/* We are not parsing a constant-expression. */
parser->integral_constant_expression_p = false;
parser->allow_non_integral_constant_expression_p = false;
parser->non_integral_constant_expression_p = false;
/* Local variable names are not forbidden. */
parser->local_variables_forbidden_p = false;
/* We are not processing an `extern "C"' declaration. */
parser->in_unbraced_linkage_specification_p = false;
/* We are not processing a declarator. */
parser->in_declarator_p = false;
/* We are not processing a template-argument-list. */
parser->in_template_argument_list_p = false;
/* We are not in an iteration statement. */
parser->in_statement = 0;
/* We are not in a switch statement. */
parser->in_switch_statement_p = false;
/* We are not parsing a type-id inside an expression. */
parser->in_type_id_in_expr_p = false;
/* Declarations aren't implicitly extern "C". */
parser->implicit_extern_c = false;
/* String literals should be translated to the execution character set. */
parser->translate_strings_p = true;
/* We are not parsing a function body. */
parser->in_function_body = false;
/* We can correct until told otherwise. */
parser->colon_corrects_to_scope_p = true;
/* The unparsed function queue is empty. */
push_unparsed_function_queues (parser);
/* There are no classes being defined. */
parser->num_classes_being_defined = 0;
/* No template parameters apply. */
parser->num_template_parameter_lists = 0;
return parser;
}
/* Create a cp_lexer structure which will emit the tokens in CACHE
and push it onto the parser's lexer stack. This is used for delayed
parsing of in-class method bodies and default arguments, and should
not be confused with tentative parsing. */
static void
cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache)
{
cp_lexer *lexer = cp_lexer_new_from_tokens (cache);
lexer->next = parser->lexer;
parser->lexer = lexer;
/* Move the current source position to that of the first token in the
new lexer. */
cp_lexer_set_source_position_from_token (lexer->next_token);
}
/* Pop the top lexer off the parser stack. This is never used for the
"main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */
static void
cp_parser_pop_lexer (cp_parser *parser)
{
cp_lexer *lexer = parser->lexer;
parser->lexer = lexer->next;
cp_lexer_destroy (lexer);
/* Put the current source position back where it was before this
lexer was pushed. */
cp_lexer_set_source_position_from_token (parser->lexer->next_token);
}
/* Lexical conventions [gram.lex] */
/* Parse an identifier. Returns an IDENTIFIER_NODE representing the
identifier. */
static tree
cp_parser_identifier (cp_parser* parser)
{
cp_token *token;
/* Look for the identifier. */
token = cp_parser_require (parser, CPP_NAME, RT_NAME);
/* Return the value. */
return token ? token->u.value : error_mark_node;
}
/* Parse a sequence of adjacent string constants. Returns a
TREE_STRING representing the combined, nul-terminated string
constant. If TRANSLATE is true, translate the string to the
execution character set. If WIDE_OK is true, a wide string is
invalid here.
C++98 [lex.string] says that if a narrow string literal token is
adjacent to a wide string literal token, the behavior is undefined.
However, C99 6.4.5p4 says that this results in a wide string literal.
We follow C99 here, for consistency with the C front end.
This code is largely lifted from lex_string() in c-lex.c.
FUTURE: ObjC++ will need to handle @-strings here. */
static tree
cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok)
{
tree value;
size_t count;
struct obstack str_ob;
cpp_string str, istr, *strs;
cp_token *tok;
enum cpp_ttype type, curr_type;
int have_suffix_p = 0;
tree string_tree;
tree suffix_id = NULL_TREE;
bool curr_tok_is_userdef_p = false;
tok = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_is_string_literal (tok))
{
cp_parser_error (parser, "expected string-literal");
return error_mark_node;
}
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
}
type = curr_type;
/* Try to avoid the overhead of creating and destroying an obstack
for the common case of just one string. */
if (!cp_parser_is_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
{
cp_lexer_consume_token (parser->lexer);
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
count = 1;
if (curr_tok_is_userdef_p)
{
suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
have_suffix_p = 1;
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
strs = &str;
}
else
{
gcc_obstack_init (&str_ob);
count = 0;
do
{
cp_lexer_consume_token (parser->lexer);
count++;
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
if (curr_tok_is_userdef_p)
{
tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
if (have_suffix_p == 0)
{
suffix_id = curr_suffix_id;
have_suffix_p = 1;
}
else if (have_suffix_p == 1
&& curr_suffix_id != suffix_id)
{
error ("inconsistent user-defined literal suffixes"
" %qD and %qD in string literal",
suffix_id, curr_suffix_id);
have_suffix_p = -1;
}
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
if (type != curr_type)
{
if (type == CPP_STRING)
type = curr_type;
else if (curr_type != CPP_STRING)
error_at (tok->location,
"unsupported non-standard concatenation "
"of string literals");
}
obstack_grow (&str_ob, &str, sizeof (cpp_string));
tok = cp_lexer_peek_token (parser->lexer);
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
curr_tok_is_userdef_p = false;
}
}
while (cp_parser_is_string_literal (tok));
strs = (cpp_string *) obstack_finish (&str_ob);
}
if (type != CPP_STRING && !wide_ok)
{
cp_parser_error (parser, "a wide string is invalid in this context");
type = CPP_STRING;
}
if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate)
(parse_in, strs, count, &istr, type))
{
value = build_string (istr.len, (const char *)istr.text);
free (CONST_CAST (unsigned char *, istr.text));
switch (type)
{
default:
case CPP_STRING:
case CPP_UTF8STRING:
TREE_TYPE (value) = char_array_type_node;
break;
case CPP_STRING16:
TREE_TYPE (value) = char16_array_type_node;
break;
case CPP_STRING32:
TREE_TYPE (value) = char32_array_type_node;
break;
case CPP_WSTRING:
TREE_TYPE (value) = wchar_array_type_node;
break;
}
value = fix_string_type (value);
if (have_suffix_p)
{
tree literal = build_userdef_literal (suffix_id, value, NULL_TREE);
tok->u.value = literal;
return cp_parser_userdef_string_literal (tok);
}
}
else
/* cpp_interpret_string has issued an error. */
value = error_mark_node;
if (count > 1)
obstack_free (&str_ob, 0);
return value;
}
/* Look up a literal operator with the name and the exact arguments. */
static tree
lookup_literal_operator (tree name, VEC(tree,gc) *args)
{
tree decl, fns;
decl = lookup_name (name);
if (!decl || !is_overloaded_fn (decl))
return error_mark_node;
for (fns = decl; fns; fns = OVL_NEXT (fns))
{
unsigned int ix;
bool found = true;
tree fn = OVL_CURRENT (fns);
tree argtypes = NULL_TREE;
argtypes = TYPE_ARG_TYPES (TREE_TYPE (fn));
if (argtypes != NULL_TREE)
{
for (ix = 0; ix < VEC_length (tree, args) && argtypes != NULL_TREE;
++ix, argtypes = TREE_CHAIN (argtypes))
{
tree targ = TREE_VALUE (argtypes);
tree tparm = TREE_TYPE (VEC_index (tree, args, ix));
bool ptr = TREE_CODE (targ) == POINTER_TYPE;
bool arr = TREE_CODE (tparm) == ARRAY_TYPE;
if ((ptr || arr || !same_type_p (targ, tparm))
&& (!ptr || !arr
|| !same_type_p (TREE_TYPE (targ),
TREE_TYPE (tparm))))
found = false;
}
if (found
&& ix == VEC_length (tree, args)
/* May be this should be sufficient_parms_p instead,
depending on how exactly should user-defined literals
work in presence of default arguments on the literal
operator parameters. */
&& argtypes == void_list_node)
return fn;
}
}
return error_mark_node;
}
/* Parse a user-defined char constant. Returns a call to a user-defined
literal operator taking the character as an argument. */
static tree
cp_parser_userdef_char_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
VEC(tree,gc) *args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
decl = lookup_literal_operator (name, args);
if (!decl || decl == error_mark_node)
{
error ("unable to find character literal operator %qD with %qT argument",
name, TREE_TYPE (value));
release_tree_vector (args);
return error_mark_node;
}
result = finish_call_expr (decl, &args, false, true, tf_warning_or_error);
release_tree_vector (args);
if (result != error_mark_node)
return result;
error ("unable to find character literal operator %qD with %qT argument",
name, TREE_TYPE (value));
return error_mark_node;
}
/* A subroutine of cp_parser_userdef_numeric_literal to
create a char... template parameter pack from a string node. */
static tree
make_char_string_pack (tree value)
{
tree charvec;
tree argpack = make_node (NONTYPE_ARGUMENT_PACK);
const char *str = TREE_STRING_POINTER (value);
int i, len = TREE_STRING_LENGTH (value) - 1;
tree argvec = make_tree_vec (1);
/* Fill in CHARVEC with all of the parameters. */
charvec = make_tree_vec (len);
for (i = 0; i < len; ++i)
TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]);
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, charvec);
TREE_TYPE (argpack) = char_type_node;
TREE_VEC_ELT (argvec, 0) = argpack;
return argvec;
}
/* Parse a user-defined numeric constant. returns a call to a user-defined
literal operator. */
static tree
cp_parser_userdef_numeric_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
tree num_string = USERDEF_LITERAL_NUM_STRING (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
VEC(tree,gc) *args;
/* Look for a literal operator taking the exact type of numeric argument
as the literal value. */
args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
/* If the numeric argument didn't work, look for a raw literal
operator taking a const char* argument consisting of the number
in string format. */
args = make_tree_vector ();
VEC_safe_push (tree, gc, args, num_string);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
/* If the raw literal didn't work, look for a non-type template
function with parameter pack char.... Call the function with
template parameter characters representing the number. */
args = make_tree_vector ();
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
tree tmpl_args = make_char_string_pack (num_string);
decl = lookup_template_function (decl, tmpl_args);
result = finish_call_expr (decl, &args, false, true, tf_none);
if (result != error_mark_node)
{
release_tree_vector (args);
return result;
}
}
release_tree_vector (args);
error ("unable to find numeric literal operator %qD", name);
return error_mark_node;
}
/* Parse a user-defined string constant. Returns a call to a user-defined
literal operator taking a character pointer and the length of the string
as arguments. */
static tree
cp_parser_userdef_string_literal (cp_token *token)
{
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree value = USERDEF_LITERAL_VALUE (literal);
int len = TREE_STRING_LENGTH (value)
/ TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1;
tree decl, result;
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
VEC(tree,gc) *args = make_tree_vector ();
VEC_safe_push (tree, gc, args, value);
VEC_safe_push (tree, gc, args, build_int_cst (size_type_node, len));
decl = lookup_name (name);
if (!decl || decl == error_mark_node)
{
error ("unable to find string literal operator %qD", name);
release_tree_vector (args);
return error_mark_node;
}
result = finish_call_expr (decl, &args, false, true, tf_none);
release_tree_vector (args);
if (result != error_mark_node)
return result;
error ("unable to find string literal operator %qD with %qT, %qT arguments",
name, TREE_TYPE (value), size_type_node);
return error_mark_node;
}
/* Basic concepts [gram.basic] */
/* Parse a translation-unit.
translation-unit:
declaration-seq [opt]
Returns TRUE if all went well. */
static bool
cp_parser_translation_unit (cp_parser* parser)
{
/* The address of the first non-permanent object on the declarator
obstack. */
static void *declarator_obstack_base;
bool success;
/* Create the declarator obstack, if necessary. */
if (!cp_error_declarator)
{
gcc_obstack_init (&declarator_obstack);
/* Create the error declarator. */
cp_error_declarator = make_declarator (cdk_error);
/* Create the empty parameter list. */
no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE);
/* Remember where the base of the declarator obstack lies. */
declarator_obstack_base = obstack_next_free (&declarator_obstack);
}
cp_parser_declaration_seq_opt (parser);
/* If there are no tokens left then all went well. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
/* Get rid of the token array; we don't need it any more. */
cp_lexer_destroy (parser->lexer);
parser->lexer = NULL;
/* This file might have been a context that's implicitly extern
"C". If so, pop the lang context. (Only relevant for PCH.) */
if (parser->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
/* Finish up. */
finish_translation_unit ();
success = true;
}
else
{
cp_parser_error (parser, "expected declaration");
success = false;
}
/* Make sure the declarator obstack was fully cleaned up. */
gcc_assert (obstack_next_free (&declarator_obstack)
== declarator_obstack_base);
/* All went well. */
return success;
}
/* Expressions [gram.expr] */
/* Parse a primary-expression.
primary-expression:
literal
this
( expression )
id-expression
GNU Extensions:
primary-expression:
( compound-statement )
__builtin_va_arg ( assignment-expression , type-id )
__builtin_offsetof ( type-id , offsetof-expression )
C++ Extensions:
__has_nothrow_assign ( type-id )
__has_nothrow_constructor ( type-id )
__has_nothrow_copy ( type-id )
__has_trivial_assign ( type-id )
__has_trivial_constructor ( type-id )
__has_trivial_copy ( type-id )
__has_trivial_destructor ( type-id )
__has_virtual_destructor ( type-id )
__is_abstract ( type-id )
__is_base_of ( type-id , type-id )
__is_class ( type-id )
__is_convertible_to ( type-id , type-id )
__is_empty ( type-id )
__is_enum ( type-id )
__is_final ( type-id )
__is_literal_type ( type-id )
__is_pod ( type-id )
__is_polymorphic ( type-id )
__is_std_layout ( type-id )
__is_trivial ( type-id )
__is_union ( type-id )
Objective-C++ Extension:
primary-expression:
objc-expression
literal:
__null
ADDRESS_P is true iff this expression was immediately preceded by
"&" and therefore might denote a pointer-to-member. CAST_P is true
iff this expression is the target of a cast. TEMPLATE_ARG_P is
true iff this expression is a template argument.
Returns a representation of the expression. Upon return, *IDK
indicates what kind of id-expression (if any) was present. */
static tree
cp_parser_primary_expression (cp_parser *parser,
bool address_p,
bool cast_p,
bool template_arg_p,
cp_id_kind *idk)
{
cp_token *token = NULL;
/* Assume the primary expression is not an id-expression. */
*idk = CP_ID_KIND_NONE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
/* literal:
integer-literal
character-literal
floating-literal
string-literal
boolean-literal
pointer-literal
user-defined-literal */
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_WCHAR:
case CPP_NUMBER:
if (TREE_CODE (token->u.value) == USERDEF_LITERAL)
return cp_parser_userdef_numeric_literal (parser);
token = cp_lexer_consume_token (parser->lexer);
if (TREE_CODE (token->u.value) == FIXED_CST)
{
error_at (token->location,
"fixed-point types not supported in C++");
return error_mark_node;
}
/* Floating-point literals are only allowed in an integral
constant expression if they are cast to an integral or
enumeration type. */
if (TREE_CODE (token->u.value) == REAL_CST
&& parser->integral_constant_expression_p
&& pedantic)
{
/* CAST_P will be set even in invalid code like "int(2.7 +
...)". Therefore, we have to check that the next token
is sure to end the cast. */
if (cast_p)
{
cp_token *next_token;
next_token = cp_lexer_peek_token (parser->lexer);
if (/* The comma at the end of an
enumerator-definition. */
next_token->type != CPP_COMMA
/* The curly brace at the end of an enum-specifier. */
&& next_token->type != CPP_CLOSE_BRACE
/* The end of a statement. */
&& next_token->type != CPP_SEMICOLON
/* The end of the cast-expression. */
&& next_token->type != CPP_CLOSE_PAREN
/* The end of an array bound. */
&& next_token->type != CPP_CLOSE_SQUARE
/* The closing ">" in a template-argument-list. */
&& (next_token->type != CPP_GREATER
|| parser->greater_than_is_operator_p)
/* C++0x only: A ">>" treated like two ">" tokens,
in a template-argument-list. */
&& (next_token->type != CPP_RSHIFT
|| (cxx_dialect == cxx98)
|| parser->greater_than_is_operator_p))
cast_p = false;
}
/* If we are within a cast, then the constraint that the
cast is to an integral or enumeration type will be
checked at that point. If we are not within a cast, then
this code is invalid. */
if (!cast_p)
cp_parser_non_integral_constant_expression (parser, NIC_FLOAT);
}
return token->u.value;
case CPP_CHAR_USERDEF:
case CPP_CHAR16_USERDEF:
case CPP_CHAR32_USERDEF:
case CPP_WCHAR_USERDEF:
return cp_parser_userdef_char_literal (parser);
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
case CPP_STRING_USERDEF:
case CPP_STRING16_USERDEF:
case CPP_STRING32_USERDEF:
case CPP_WSTRING_USERDEF:
case CPP_UTF8STRING_USERDEF:
/* ??? Should wide strings be allowed when parser->translate_strings_p
is false (i.e. in attributes)? If not, we can kill the third
argument to cp_parser_string_literal. */
return cp_parser_string_literal (parser,
parser->translate_strings_p,
true);
case CPP_OPEN_PAREN:
{
tree expr;
bool saved_greater_than_is_operator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* If we see `( { ' then we are looking at the beginning of
a GNU statement-expression. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Statement-expressions are not allowed by the standard. */
pedwarn (token->location, OPT_pedantic,
"ISO C++ forbids braced-groups within expressions");
/* And they're not allowed outside of a function-body; you
cannot, for example, write:
int i = ({ int j = 3; j + 1; });
at class or namespace scope. */
if (!parser->in_function_body
|| parser->in_template_argument_list_p)
{
error_at (token->location,
"statement-expressions are not allowed outside "
"functions nor in template-argument lists");
cp_parser_skip_to_end_of_block_or_statement (parser);
expr = error_mark_node;
}
else
{
/* Start the statement-expression. */
expr = begin_stmt_expr ();
/* Parse the compound-statement. */
cp_parser_compound_statement (parser, expr, false, false);
/* Finish up. */
expr = finish_stmt_expr (expr, false);
}
}
else
{
/* Parse the parenthesized expression. */
expr = cp_parser_expression (parser, cast_p, idk);
/* Let the front end know that this expression was
enclosed in parentheses. This matters in case, for
example, the expression is of the form `A::B', since
`&A::B' might be a pointer-to-member, but `&(A::B)' is
not. */
finish_parenthesized_expr (expr);
/* DR 705: Wrapping an unqualified name in parentheses
suppresses arg-dependent lookup. We want to pass back
CP_ID_KIND_QUALIFIED for suppressing vtable lookup
(c++/37862), but none of the others. */
if (*idk != CP_ID_KIND_QUALIFIED)
*idk = CP_ID_KIND_NONE;
}
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Consume the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_end_of_statement (parser);
return expr;
}
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
/* We have an Objective-C++ message. */
return cp_parser_objc_expression (parser);
{
tree lam = cp_parser_lambda_expression (parser);
/* Don't warn about a failed tentative parse. */
if (cp_parser_error_occurred (parser))
return error_mark_node;
maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR);
return lam;
}
case CPP_OBJC_STRING:
if (c_dialect_objc ())
/* We have an Objective-C++ string literal. */
return cp_parser_objc_expression (parser);
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
case CPP_KEYWORD:
switch (token->keyword)
{
/* These two are the boolean literals. */
case RID_TRUE:
cp_lexer_consume_token (parser->lexer);
return boolean_true_node;
case RID_FALSE:
cp_lexer_consume_token (parser->lexer);
return boolean_false_node;
/* The `__null' literal. */
case RID_NULL:
cp_lexer_consume_token (parser->lexer);
return null_node;
/* The `nullptr' literal. */
case RID_NULLPTR:
cp_lexer_consume_token (parser->lexer);
return nullptr_node;
/* Recognize the `this' keyword. */
case RID_THIS:
cp_lexer_consume_token (parser->lexer);
if (parser->local_variables_forbidden_p)
{
error_at (token->location,
"%<this%> may not be used in this context");
return error_mark_node;
}
/* Pointers cannot appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_THIS))
return error_mark_node;
return finish_this_expr ();
/* The `operator' keyword can be the beginning of an
id-expression. */
case RID_OPERATOR:
goto id_expression;
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
{
non_integral_constant name;
/* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and
__func__ are the names of variables -- but they are
treated specially. Therefore, they are handled here,
rather than relying on the generic id-expression logic
below. Grammatically, these names are id-expressions.
Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
switch (token->keyword)
{
case RID_FUNCTION_NAME:
name = NIC_FUNC_NAME;
break;
case RID_PRETTY_FUNCTION_NAME:
name = NIC_PRETTY_FUNC;
break;
case RID_C99_FUNCTION_NAME:
name = NIC_C99_FUNC;
break;
default:
gcc_unreachable ();
}
if (cp_parser_non_integral_constant_expression (parser, name))
return error_mark_node;
/* Look up the name. */
return finish_fname (token->u.value);
}
case RID_VA_ARG:
{
tree expression;
tree type;
/* The `__builtin_va_arg' construct is used to handle
`va_arg'. Consume the `__builtin_va_arg' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Now, parse the assignment-expression. */
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false, NULL);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Using `va_arg' in a constant-expression is not
allowed. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_VA_ARG))
return error_mark_node;
return build_x_va_arg (expression, type);
}
case RID_OFFSETOF:
return cp_parser_builtin_offsetof (parser);
case RID_HAS_NOTHROW_ASSIGN:
case RID_HAS_NOTHROW_CONSTRUCTOR:
case RID_HAS_NOTHROW_COPY:
case RID_HAS_TRIVIAL_ASSIGN:
case RID_HAS_TRIVIAL_CONSTRUCTOR:
case RID_HAS_TRIVIAL_COPY:
case RID_HAS_TRIVIAL_DESTRUCTOR:
case RID_HAS_VIRTUAL_DESTRUCTOR:
case RID_IS_ABSTRACT:
case RID_IS_BASE_OF:
case RID_IS_CLASS:
case RID_IS_CONVERTIBLE_TO:
case RID_IS_EMPTY:
case RID_IS_ENUM:
case RID_IS_FINAL:
case RID_IS_LITERAL_TYPE:
case RID_IS_POD:
case RID_IS_POLYMORPHIC:
case RID_IS_STD_LAYOUT:
case RID_IS_TRIVIAL:
case RID_IS_UNION:
return cp_parser_trait_expr (parser, token->keyword);
/* Objective-C++ expressions. */
case RID_AT_ENCODE:
case RID_AT_PROTOCOL:
case RID_AT_SELECTOR:
return cp_parser_objc_expression (parser);
case RID_TEMPLATE:
if (parser->in_function_body
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_LESS))
{
error_at (token->location,
"a template declaration cannot appear at block scope");
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
/* An id-expression can start with either an identifier, a
`::' as the beginning of a qualified-id, or the "operator"
keyword. */
case CPP_NAME:
case CPP_SCOPE:
case CPP_TEMPLATE_ID:
case CPP_NESTED_NAME_SPECIFIER:
{
tree id_expression;
tree decl;
const char *error_msg;
bool template_p;
bool done;
cp_token *id_expr_token;
id_expression:
/* Parse the id-expression. */
id_expression
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
if (id_expression == error_mark_node)
return error_mark_node;
id_expr_token = token;
token = cp_lexer_peek_token (parser->lexer);
done = (token->type != CPP_OPEN_SQUARE
&& token->type != CPP_OPEN_PAREN
&& token->type != CPP_DOT
&& token->type != CPP_DEREF
&& token->type != CPP_PLUS_PLUS
&& token->type != CPP_MINUS_MINUS);
/* If we have a template-id, then no further lookup is
required. If the template-id was for a template-class, we
will sometimes have a TYPE_DECL at this point. */
if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR
|| TREE_CODE (id_expression) == TYPE_DECL)
decl = id_expression;
/* Look up the name. */
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (id_expr_token->type == CPP_NAME
&& id_expr_token->ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
decl = cp_parser_lookup_name (parser, id_expression,
none_type,
template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
id_expr_token->location);
/* If the lookup was ambiguous, an error will already have
been issued. */
if (ambiguous_decls)
return error_mark_node;
/* In Objective-C++, we may have an Objective-C 2.0
dot-syntax for classes here. */
if (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& TREE_CODE (decl) == TYPE_DECL
&& objc_is_class_name (decl))
{
tree component;
cp_lexer_consume_token (parser->lexer);
component = cp_parser_identifier (parser);
if (component == error_mark_node)
return error_mark_node;
return objc_build_class_component_ref (id_expression, component);
}
/* In Objective-C++, an instance variable (ivar) may be preferred
to whatever cp_parser_lookup_name() found. */
decl = objc_lookup_ivar (decl, id_expression);
/* If name lookup gives us a SCOPE_REF, then the
qualifying scope was dependent. */
if (TREE_CODE (decl) == SCOPE_REF)
{
/* At this point, we do not know if DECL is a valid
integral constant expression. We assume that it is
in fact such an expression, so that code like:
template <int N> struct A {
int a[B<N>::i];
};
is accepted. At template-instantiation time, we
will check that B<N>::i is actually a constant. */
return decl;
}
/* Check to see if DECL is a local variable in a context
where that is forbidden. */
if (parser->local_variables_forbidden_p
&& local_variable_p (decl))
{
/* It might be that we only found DECL because we are
trying to be generous with pre-ISO scoping rules.
For example, consider:
int i;
void g() {
for (int i = 0; i < 10; ++i) {}
extern void f(int j = i);
}
Here, name look up will originally find the out
of scope `i'. We need to issue a warning message,
but then use the global `i'. */
decl = check_for_out_of_scope_variable (decl);
if (local_variable_p (decl))
{
error_at (id_expr_token->location,
"local variable %qD may not appear in this context",
decl);
return error_mark_node;
}
}
}
decl = (finish_id_expression
(id_expression, decl, parser->scope,
idk,
parser->integral_constant_expression_p,
parser->allow_non_integral_constant_expression_p,
&parser->non_integral_constant_expression_p,
template_p, done, address_p,
template_arg_p,
&error_msg,
id_expr_token->location));
if (error_msg)
cp_parser_error (parser, error_msg);
return decl;
}
/* Anything else is an error. */
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
}
/* Parse an id-expression.
id-expression:
unqualified-id
qualified-id
qualified-id:
:: [opt] nested-name-specifier template [opt] unqualified-id
:: identifier
:: operator-function-id
:: template-id
Return a representation of the unqualified portion of the
identifier. Sets PARSER->SCOPE to the qualifying scope if there is
a `::' or nested-name-specifier.
Often, if the id-expression was a qualified-id, the caller will
want to make a SCOPE_REF to represent the qualified-id. This
function does not do this in order to avoid wastefully creating
SCOPE_REFs when they are not required.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword.
If CHECK_DEPENDENCY_P is false, then names are looked up inside
uninstantiated templates.
If *TEMPLATE_P is non-NULL, it is set to true iff the
`template' keyword is used to explicitly indicate that the entity
named is a template.
If DECLARATOR_P is true, the id-expression is appearing as part of
a declarator, rather than as part of an expression. */
static tree
cp_parser_id_expression (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool *template_p,
bool declarator_p,
bool optional_p)
{
bool global_scope_p;
bool nested_name_specifier_p;
/* Assume the `template' keyword was not used. */
if (template_p)
*template_p = template_keyword_p;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
check_dependency_p,
/*type_p=*/false,
declarator_p)
!= NULL_TREE);
/* If there is a nested-name-specifier, then we are looking at
the first qualified-id production. */
if (nested_name_specifier_p)
{
tree saved_scope;
tree saved_object_scope;
tree saved_qualifying_scope;
tree unqualified_id;
bool is_template;
/* See if the next token is the `template' keyword. */
if (!template_p)
template_p = &is_template;
*template_p = cp_parser_optional_template_keyword (parser);
/* Name lookup we do during the processing of the
unqualified-id might obliterate SCOPE. */
saved_scope = parser->scope;
saved_object_scope = parser->object_scope;
saved_qualifying_scope = parser->qualifying_scope;
/* Process the final unqualified-id. */
unqualified_id = cp_parser_unqualified_id (parser, *template_p,
check_dependency_p,
declarator_p,
/*optional_p=*/false);
/* Restore the SAVED_SCOPE for our caller. */
parser->scope = saved_scope;
parser->object_scope = saved_object_scope;
parser->qualifying_scope = saved_qualifying_scope;
return unqualified_id;
}
/* Otherwise, if we are in global scope, then we are looking at one
of the other qualified-id productions. */
else if (global_scope_p)
{
cp_token *token;
tree id;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an identifier, and the next token is not a "<", then
we can avoid the template-id case. This is an optimization
for this common case. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
return cp_parser_identifier (parser);
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Peek at the next token. (Changes in the token buffer may
have invalidated the pointer obtained above.) */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
return cp_parser_identifier (parser);
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
return cp_parser_operator_function_id (parser);
/* Fall through. */
default:
cp_parser_error (parser, "expected id-expression");
return error_mark_node;
}
}
else
return cp_parser_unqualified_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p,
optional_p);
}
/* Parse an unqualified-id.
unqualified-id:
identifier
operator-function-id
conversion-function-id
~ class-name
template-id
If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template'
keyword, in a construct like `A::template ...'.
Returns a representation of unqualified-id. For the `identifier'
production, an IDENTIFIER_NODE is returned. For the `~ class-name'
production a BIT_NOT_EXPR is returned; the operand of the
BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the
other productions, see the documentation accompanying the
corresponding parsing functions. If CHECK_DEPENDENCY_P is false,
names are looked up in uninstantiated templates. If DECLARATOR_P
is true, the unqualified-id is appearing as part of a declarator,
rather than as part of an expression. */
static tree
cp_parser_unqualified_id (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool declarator_p,
bool optional_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
{
tree id;
/* We don't know yet whether or not this will be a
template-id. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
/* If it worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, it's an ordinary identifier. */
return cp_parser_identifier (parser);
}
case CPP_TEMPLATE_ID:
return cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
declarator_p);
case CPP_COMPL:
{
tree type_decl;
tree qualifying_scope;
tree object_scope;
tree scope;
bool done;
/* Consume the `~' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the class-name. The standard, as written, seems to
say that:
template <typename T> struct S { ~S (); };
template <typename T> S<T>::~S() {}
is invalid, since `~' must be followed by a class-name, but
`S<T>' is dependent, and so not known to be a class.
That's not right; we need to look in uninstantiated
templates. A further complication arises from:
template <typename T> void f(T t) {
t.T::~T();
}
Here, it is not possible to look up `T' in the scope of `T'
itself. We must look in both the current scope, and the
scope of the containing complete expression.
Yet another issue is:
struct S {
int S;
~S();
};
S::~S() {}
The standard does not seem to say that the `S' in `~S'
should refer to the type `S' and not the data member
`S::S'. */
/* DR 244 says that we look up the name after the "~" in the
same scope as we looked up the qualifying name. That idea
isn't fully worked out; it's more complicated than that. */
scope = parser->scope;
object_scope = parser->object_scope;
qualifying_scope = parser->qualifying_scope;
/* Check for invalid scopes. */
if (scope == error_mark_node)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
if (scope && TREE_CODE (scope) == NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"scope %qT before %<~%> is not a class-name",
scope);
cp_parser_simulate_error (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
gcc_assert (!scope || TYPE_P (scope));
/* If the name is of the form "X::~X" it's OK even if X is a
typedef. */
token = cp_lexer_peek_token (parser->lexer);
if (scope
&& token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
!= CPP_LESS)
&& (token->u.value == TYPE_IDENTIFIER (scope)
|| (CLASS_TYPE_P (scope)
&& constructor_name_p (token->u.value, scope))))
{
cp_lexer_consume_token (parser->lexer);
return build_nt (BIT_NOT_EXPR, scope);
}
/* If there was an explicit qualification (S::~T), first look
in the scope given by the qualification (i.e., S).
Note: in the calls to cp_parser_class_name below we pass
typename_type so that lookup finds the injected-class-name
rather than the constructor. */
done = false;
type_decl = NULL_TREE;
if (scope)
{
cp_parser_parse_tentatively (parser);
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "N::S::~S", look in "N" as well. */
if (!done && scope && qualifying_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = qualifying_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "p->S::~T", look in the scope given by "*p" as well. */
else if (!done && object_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = object_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* Look in the surrounding context. */
if (!done)
{
parser->scope = NULL_TREE;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
if (processing_template_decl)
cp_parser_parse_tentatively (parser);
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (processing_template_decl
&& ! cp_parser_parse_definitely (parser))
{
/* We couldn't find a type with this name, so just accept
it and check for a match at instantiation time. */
type_decl = cp_parser_identifier (parser);
if (type_decl != error_mark_node)
type_decl = build_nt (BIT_NOT_EXPR, type_decl);
return type_decl;
}
}
/* If an error occurred, assume that the name of the
destructor is the same as the name of the qualifying
class. That allows us to keep parsing after running
into ill-formed destructor names. */
if (type_decl == error_mark_node && scope)
return build_nt (BIT_NOT_EXPR, scope);
else if (type_decl == error_mark_node)
return error_mark_node;
/* Check that destructor name and scope match. */
if (declarator_p && scope && !check_dtor_name (scope, type_decl))
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"declaration of %<~%T%> as member of %qT",
type_decl, scope);
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* [class.dtor]
A typedef-name that names a class shall not be used as the
identifier in the declarator for a destructor declaration. */
if (declarator_p
&& !DECL_IMPLICIT_TYPEDEF_P (type_decl)
&& !DECL_SELF_REFERENCE_P (type_decl)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"typedef-name %qD used as destructor declarator",
type_decl);
return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl));
}
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
{
tree id;
/* This could be a template-id, so we try that first. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* We still don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
id = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
id = cp_parser_conversion_function_id (parser);
else if (UDLIT_OPER_P (id))
{
/* 17.6.3.3.5 */
const char *name = UDLIT_OP_SUFFIX (id);
if (name[0] != '_' && !in_system_header)
warning (0, "literal operator suffixes not preceded by %<_%>"
" are reserved for future standardization");
}
return id;
}
/* Fall through. */
default:
if (optional_p)
return NULL_TREE;
cp_parser_error (parser, "expected unqualified-id");
return error_mark_node;
}
}
/* Parse an (optional) nested-name-specifier.
nested-name-specifier: [C++98]
class-or-namespace-name :: nested-name-specifier [opt]
class-or-namespace-name :: template nested-name-specifier [opt]
nested-name-specifier: [C++0x]
type-name ::
namespace-name ::
nested-name-specifier identifier ::
nested-name-specifier template [opt] simple-template-id ::
PARSER->SCOPE should be set appropriately before this function is
called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in
effect. TYPE_P is TRUE if we non-type bindings should be ignored
in name lookups.
Sets PARSER->SCOPE to the class (TYPE) or namespace
(NAMESPACE_DECL) specified by the nested-name-specifier, or leaves
it unchanged if there is no nested-name-specifier. Returns the new
scope iff there is a nested-name-specifier, or NULL_TREE otherwise.
If IS_DECLARATION is TRUE, the nested-name-specifier is known to be
part of a declaration and/or decl-specifier. */
static tree
cp_parser_nested_name_specifier_opt (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
bool success = false;
cp_token_position start = 0;
cp_token *token;
/* Remember where the nested-name-specifier starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
start = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
}
while (true)
{
tree new_scope;
tree old_scope;
tree saved_qualifying_scope;
bool template_keyword_p;
/* Spot cases that cannot be the beginning of a
nested-name-specifier. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is CPP_NESTED_NAME_SPECIFIER, just process
the already parsed nested-name-specifier. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
/* Grab the nested-name-specifier and continue the loop. */
cp_parser_pre_parsed_nested_name_specifier (parser);
/* If we originally encountered this nested-name-specifier
with IS_DECLARATION set to false, we will not have
resolved TYPENAME_TYPEs, so we must do so here. */
if (is_declaration
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
{
new_scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
if (TREE_CODE (new_scope) != TYPENAME_TYPE)
parser->scope = new_scope;
}
success = true;
continue;
}
/* Spot cases that cannot be the beginning of a
nested-name-specifier. On the second and subsequent times
through the loop, we look for the `template' keyword. */
if (success && token->keyword == RID_TEMPLATE)
;
/* A template-id can start a nested-name-specifier. */
else if (token->type == CPP_TEMPLATE_ID)
;
/* DR 743: decltype can be used in a nested-name-specifier. */
else if (token_is_decltype (token))
;
else
{
/* If the next token is not an identifier, then it is
definitely not a type-name or namespace-name. */
if (token->type != CPP_NAME)
break;
/* If the following token is neither a `<' (to begin a
template-id), nor a `::', then we are not looking at a
nested-name-specifier. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON
&& parser->colon_corrects_to_scope_p
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME)
{
error_at (token->location,
"found %<:%> in nested-name-specifier, expected %<::%>");
token->type = CPP_SCOPE;
}
if (token->type != CPP_SCOPE
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
break;
}
/* The nested-name-specifier is optional, so we parse
tentatively. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `template' keyword, if this isn't the
first time through the loop. */
if (success)
template_keyword_p = cp_parser_optional_template_keyword (parser);
else
template_keyword_p = false;
/* Save the old scope since the name lookup we are about to do
might destroy it. */
old_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
/* In a declarator-id like "X<T>::I::Y<T>" we must be able to
look up names in "X<T>::I" in order to determine that "Y" is
a template. So, if we have a typename at this point, we make
an effort to look through it. */
if (is_declaration
&& !typename_keyword_p
&& parser->scope
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
parser->scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
/* Parse the qualifying entity. */
new_scope
= cp_parser_qualifying_entity (parser,
typename_keyword_p,
template_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
/* If we found what we wanted, we keep going; otherwise, we're
done. */
if (!cp_parser_parse_definitely (parser))
{
bool error_p = false;
/* Restore the OLD_SCOPE since it was valid before the
failed attempt at finding the last
class-or-namespace-name. */
parser->scope = old_scope;
parser->qualifying_scope = saved_qualifying_scope;
/* If the next token is a decltype, and the one after that is a
`::', then the decltype has failed to resolve to a class or
enumeration type. Give this error even when parsing
tentatively since it can't possibly be valid--and we're going
to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we
won't get another chance.*/
if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE))
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "decltype evaluates to %qT, "
"which is not a class or enumeration type",
token->u.value);
parser->scope = error_mark_node;
error_p = true;
/* As below. */
success = true;
cp_lexer_consume_token (parser->lexer);
}
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
break;
/* If the next token is an identifier, and the one after
that is a `::', then any valid interpretation would have
found a class-or-namespace-name. */
while (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_COMPL))
{
token = cp_lexer_consume_token (parser->lexer);
if (!error_p)
{
if (!token->ambiguous_p)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, token->u.value,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (token->location,
"%qD used without template parameters",
decl);
else if (ambiguous_decls)
{
error_at (token->location,
"reference to %qD is ambiguous",
token->u.value);
print_candidates (ambiguous_decls);
decl = error_mark_node;
}
else
{
if (cxx_dialect != cxx98)
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_NOT_CXX98,
token->location);
else
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_CXX98,
token->location);
}
}
parser->scope = error_mark_node;
error_p = true;
/* Treat this as a successful nested-name-specifier
due to:
[basic.lookup.qual]
If the name found is not a class-name (clause
_class_) or namespace-name (_namespace.def_), the
program is ill-formed. */
success = true;
}
cp_lexer_consume_token (parser->lexer);
}
break;
}
/* We've found one valid nested-name-specifier. */
success = true;
/* Name lookup always gives us a DECL. */
if (TREE_CODE (new_scope) == TYPE_DECL)
new_scope = TREE_TYPE (new_scope);
/* Uses of "template" must be followed by actual templates. */
if (template_keyword_p
&& !(CLASS_TYPE_P (new_scope)
&& ((CLASSTYPE_USE_TEMPLATE (new_scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope)))
|| CLASSTYPE_IS_TEMPLATE (new_scope)))
&& !(TREE_CODE (new_scope) == TYPENAME_TYPE
&& (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope))
== TEMPLATE_ID_EXPR)))
permerror (input_location, TYPE_P (new_scope)
? G_("%qT is not a template")
: G_("%qD is not a template"),
new_scope);
/* If it is a class scope, try to complete it; we are about to
be looking up names inside the class. */
if (TYPE_P (new_scope)
/* Since checking types for dependency can be expensive,
avoid doing it if the type is already complete. */
&& !COMPLETE_TYPE_P (new_scope)
/* Do not try to complete dependent types. */
&& !dependent_type_p (new_scope))
{
new_scope = complete_type (new_scope);
/* If it is a typedef to current class, use the current
class instead, as the typedef won't have any names inside
it yet. */
if (!COMPLETE_TYPE_P (new_scope)
&& currently_open_class (new_scope))
new_scope = TYPE_MAIN_VARIANT (new_scope);
}
/* Make sure we look in the right scope the next time through
the loop. */
parser->scope = new_scope;
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER
token. That way, should we re-parse the token stream, we will
not have to repeat the effort required to do the parse, nor will
we issue duplicate error messages. */
if (success && start)
{
cp_token *token;
token = cp_lexer_token_at (parser->lexer, start);
/* Reset the contents of the START token. */
token->type = CPP_NESTED_NAME_SPECIFIER;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_alloc_cleared_tree_check ();
token->u.tree_check_value->value = parser->scope;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->u.tree_check_value->qualifying_scope =
parser->qualifying_scope;
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start);
}
if (start)
pop_to_parent_deferring_access_checks ();
return success ? parser->scope : NULL_TREE;
}
/* Parse a nested-name-specifier. See
cp_parser_nested_name_specifier_opt for details. This function
behaves identically, except that it will an issue an error if no
nested-name-specifier is present. */
static tree
cp_parser_nested_name_specifier (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree scope;
/* Look for the nested-name-specifier. */
scope = cp_parser_nested_name_specifier_opt (parser,
typename_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* If it was not present, issue an error message. */
if (!scope)
{
cp_parser_error (parser, "expected nested-name-specifier");
parser->scope = NULL_TREE;
}
return scope;
}
/* Parse the qualifying entity in a nested-name-specifier. For C++98,
this is either a class-name or a namespace-name (which corresponds
to the class-or-namespace-name production in the grammar). For
C++0x, it can also be a type-name that refers to an enumeration
type or a simple-template-id.
TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect.
TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect.
CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up.
TYPE_P is TRUE iff the next name should be taken as a class-name,
even the same name is declared to be another entity in the same
scope.
Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL)
specified by the class-or-namespace-name. If neither is found the
ERROR_MARK_NODE is returned. */
static tree
cp_parser_qualifying_entity (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree scope;
bool only_class_p;
bool successful_parse_p;
/* DR 743: decltype can appear in a nested-name-specifier. */
if (cp_lexer_next_token_is_decltype (parser->lexer))
{
scope = cp_parser_decltype (parser);
if (TREE_CODE (scope) != ENUMERAL_TYPE
&& !MAYBE_CLASS_TYPE_P (scope))
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
if (TYPE_NAME (scope))
scope = TYPE_NAME (scope);
return scope;
}
/* Before we try to parse the class-name, we must save away the
current PARSER->SCOPE since cp_parser_class_name will destroy
it. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* Try for a class-name first. If the SAVED_SCOPE is a type, then
there is no need to look for a namespace-name. */
only_class_p = template_keyword_p
|| (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98);
if (!only_class_p)
cp_parser_parse_tentatively (parser);
scope = cp_parser_class_name (parser,
typename_keyword_p,
template_keyword_p,
type_p ? class_type : none_type,
check_dependency_p,
/*class_head_p=*/false,
is_declaration);
successful_parse_p = only_class_p || cp_parser_parse_definitely (parser);
/* If that didn't work and we're in C++0x mode, try for a type-name. */
if (!only_class_p
&& cxx_dialect != cxx98
&& !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* Parse tentatively. */
cp_parser_parse_tentatively (parser);
/* Parse a type-name */
scope = cp_parser_type_name (parser);
/* "If the name found does not designate a namespace or a class,
enumeration, or dependent type, the program is ill-formed."
We cover classes and dependent types above and namespaces below,
so this code is only looking for enums. */
if (!scope || TREE_CODE (scope) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE)
cp_parser_simulate_error (parser);
successful_parse_p = cp_parser_parse_definitely (parser);
}
/* If that didn't work, try for a namespace-name. */
if (!only_class_p && !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If we are not looking at an identifier followed by the scope
resolution operator, then this is not part of a
nested-name-specifier. (Note that this function is only used
to parse the components of a nested-name-specifier.) */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
return error_mark_node;
scope = cp_parser_namespace_name (parser);
}
return scope;
}
/* Parse a postfix-expression.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( expression-list [opt] )
simple-type-specifier ( expression-list [opt] )
typename :: [opt] nested-name-specifier identifier
( expression-list [opt] )
typename :: [opt] nested-name-specifier template [opt] template-id
( expression-list [opt] )
postfix-expression . template [opt] id-expression
postfix-expression -> template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> pseudo-destructor-name
postfix-expression ++
postfix-expression --
dynamic_cast < type-id > ( expression )
static_cast < type-id > ( expression )
reinterpret_cast < type-id > ( expression )
const_cast < type-id > ( expression )
typeid ( expression )
typeid ( type-id )
GNU Extension:
postfix-expression:
( type-id ) { initializer-list , [opt] }
This extension is a GNU version of the C99 compound-literal
construct. (The C99 grammar uses `type-name' instead of `type-id',
but they are essentially the same concept.)
If ADDRESS_P is true, the postfix expression is the operand of the
`&' operator. CAST_P is true if this expression is the target of a
cast.
If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are
class member access expressions [expr.ref].
Returns a representation of the expression. */
static tree
cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p,
bool member_access_only_p,
cp_id_kind * pidk_return)
{
cp_token *token;
enum rid keyword;
cp_id_kind idk = CP_ID_KIND_NONE;
tree postfix_expression = NULL_TREE;
bool is_member_access = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some of the productions are determined by keywords. */
keyword = token->keyword;
switch (keyword)
{
case RID_DYNCAST:
case RID_STATCAST:
case RID_REINTCAST:
case RID_CONSTCAST:
{
tree type;
tree expression;
const char *saved_message;
/* All of these can be handled in the same way from the point
of view of parsing. Begin by consuming the token
identifying the cast. */
cp_lexer_consume_token (parser->lexer);
/* New types cannot be defined in the cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Look for the opening `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the type to which we are casting. */
type = cp_parser_type_id (parser);
/* Look for the closing `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* And the expression which is being cast. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expression = cp_parser_expression (parser, /*cast_p=*/true, & idk);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser, NIC_CAST))
return error_mark_node;
switch (keyword)
{
case RID_DYNCAST:
postfix_expression
= build_dynamic_cast (type, expression, tf_warning_or_error);
break;
case RID_STATCAST:
postfix_expression
= build_static_cast (type, expression, tf_warning_or_error);
break;
case RID_REINTCAST:
postfix_expression
= build_reinterpret_cast (type, expression,
tf_warning_or_error);
break;
case RID_CONSTCAST:
postfix_expression
= build_const_cast (type, expression, tf_warning_or_error);
break;
default:
gcc_unreachable ();
}
}
break;
case RID_TYPEID:
{
tree type;
const char *saved_message;
bool saved_in_type_id_in_expr_p;
/* Consume the `typeid' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `(' token. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Types cannot be defined in a `typeid' expression. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a %<typeid%> expression");
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Try a type-id first. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)' token. Otherwise, we can't be sure that
we're not looking at an expression: consider `typeid (int
(3))', for example. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, simply lookup the type-id. */
if (cp_parser_parse_definitely (parser))
postfix_expression = get_typeid (type);
/* Otherwise, fall back to the expression variant. */
else
{
tree expression;
/* Look for an expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, & idk);
/* Compute its typeid. */
postfix_expression = build_typeid (expression);
/* Look for the `)' token. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* `typeid' may not appear in an integral constant expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID))
return error_mark_node;
}
break;
case RID_TYPENAME:
{
tree type;
/* The syntax permitted here is the same permitted for an
elaborated-type-specifier. */
type = cp_parser_elaborated_type_specifier (parser,
/*is_friend=*/false,
/*is_declaration=*/false);
postfix_expression = cp_parser_functional_cast (parser, type);
}
break;
default:
{
tree type;
/* If the next thing is a simple-type-specifier, we may be
looking at a functional cast. We could also be looking at
an id-expression. So, we try the functional cast, and if
that doesn't work we fall back to the primary-expression. */
cp_parser_parse_tentatively (parser);
/* Look for the simple-type-specifier. */
type = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
/* Parse the cast itself. */
if (!cp_parser_error_occurred (parser))
postfix_expression
= cp_parser_functional_cast (parser, type);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
break;
/* If the functional-cast didn't work out, try a
compound-literal. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
VEC(constructor_elt,gc) *initializer_list = NULL;
bool saved_in_type_id_in_expr_p;
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Look for the `{'. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* If things aren't going well, there's no need to
keep going. */
if (!cp_parser_error_occurred (parser))
{
bool non_constant_p;
/* Parse the initializer-list. */
initializer_list
= cp_parser_initializer_list (parser, &non_constant_p);
/* Allow a trailing `,'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* If that worked, we're definitely looking at a
compound-literal expression. */
if (cp_parser_parse_definitely (parser))
{
/* Warn the user that a compound literal is not
allowed in standard C++. */
pedwarn (input_location, OPT_pedantic, "ISO C++ forbids compound-literals");
/* For simplicity, we disallow compound literals in
constant-expressions. We could
allow compound literals of integer type, whose
initializer was a constant, in constant
expressions. Permitting that usage, as a further
extension, would not change the meaning of any
currently accepted programs. (Of course, as
compound literals are not part of ISO C++, the
standard has nothing to say.) */
if (cp_parser_non_integral_constant_expression (parser,
NIC_NCC))
{
postfix_expression = error_mark_node;
break;
}
/* Form the representation of the compound-literal. */
postfix_expression
= (finish_compound_literal
(type, build_constructor (init_list_type_node,
initializer_list),
tf_warning_or_error));
break;
}
}
/* It must be a primary-expression. */
postfix_expression
= cp_parser_primary_expression (parser, address_p, cast_p,
/*template_arg_p=*/false,
&idk);
}
break;
}
/* Keep looping until the postfix-expression is complete. */
while (true)
{
if (idk == CP_ID_KIND_UNQUALIFIED
&& TREE_CODE (postfix_expression) == IDENTIFIER_NODE
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
/* It is not a Koenig lookup function call. */
postfix_expression
= unqualified_name_lookup_error (postfix_expression);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
postfix_expression
= cp_parser_postfix_open_square_expression (parser,
postfix_expression,
false);
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_OPEN_PAREN:
/* postfix-expression ( expression-list [opt] ) */
{
bool koenig_p;
bool is_builtin_constant_p;
bool saved_integral_constant_expression_p = false;
bool saved_non_integral_constant_expression_p = false;
VEC(tree,gc) *args;
is_member_access = false;
is_builtin_constant_p
= DECL_IS_BUILTIN_CONSTANT_P (postfix_expression);
if (is_builtin_constant_p)
{
/* The whole point of __builtin_constant_p is to allow
non-constant expressions to appear as arguments. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
}
args = (cp_parser_parenthesized_expression_list
(parser, non_attr,
/*cast_p=*/false, /*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
if (is_builtin_constant_p)
{
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
}
if (args == NULL)
{
postfix_expression = error_mark_node;
break;
}
/* Function calls are not permitted in
constant-expressions. */
if (! builtin_valid_in_constant_expr_p (postfix_expression)
&& cp_parser_non_integral_constant_expression (parser,
NIC_FUNC_CALL))
{
postfix_expression = error_mark_node;
release_tree_vector (args);
break;
}
koenig_p = false;
if (idk == CP_ID_KIND_UNQUALIFIED
|| idk == CP_ID_KIND_TEMPLATE_ID)
{
if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE)
{
if (!VEC_empty (tree, args))
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
/*include_std=*/false,
tf_warning_or_error);
}
else
postfix_expression
= unqualified_fn_lookup_error (postfix_expression);
}
/* We do not perform argument-dependent lookup if
normal lookup finds a non-function, in accordance
with the expected resolution of DR 218. */
else if (!VEC_empty (tree, args)
&& is_overloaded_fn (postfix_expression))
{
tree fn = get_first_fn (postfix_expression);
fn = STRIP_TEMPLATE (fn);
/* Do not do argument dependent lookup if regular
lookup finds a member function or a block-scope
function declaration. [basic.lookup.argdep]/3 */
if (!DECL_FUNCTION_MEMBER_P (fn)
&& !DECL_LOCAL_FUNCTION_P (fn))
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
/*include_std=*/false,
tf_warning_or_error);
}
}
}
if (TREE_CODE (postfix_expression) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (postfix_expression, 0);
tree fn = TREE_OPERAND (postfix_expression, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (args)))
{
postfix_expression
= build_nt_call_vec (postfix_expression, args);
release_tree_vector (args);
break;
}
if (BASELINK_P (fn))
{
postfix_expression
= (build_new_method_call
(instance, fn, &args, NULL_TREE,
(idk == CP_ID_KIND_QUALIFIED
? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL
: LOOKUP_NORMAL),
/*fn_p=*/NULL,
tf_warning_or_error));
}
else
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
tf_warning_or_error);
}
else if (TREE_CODE (postfix_expression) == OFFSET_REF
|| TREE_CODE (postfix_expression) == MEMBER_REF
|| TREE_CODE (postfix_expression) == DOTSTAR_EXPR)
postfix_expression = (build_offset_ref_call_from_tree
(postfix_expression, &args));
else if (idk == CP_ID_KIND_QUALIFIED)
/* A call to a static class member, or a namespace-scope
function. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/true,
koenig_p,
tf_warning_or_error);
else
/* All other function calls. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
koenig_p,
tf_warning_or_error);
/* The POSTFIX_EXPRESSION is certainly no longer an id. */
idk = CP_ID_KIND_NONE;
release_tree_vector (args);
}
break;
case CPP_DOT:
case CPP_DEREF:
/* postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name */
/* Consume the `.' or `->' operator. */
cp_lexer_consume_token (parser->lexer);
postfix_expression
= cp_parser_postfix_dot_deref_expression (parser, token->type,
postfix_expression,
false, &idk,
token->location);
is_member_access = true;
break;
case CPP_PLUS_PLUS:
/* postfix-expression ++ */
/* Consume the `++' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTINCREMENT_EXPR);
/* Increments may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_INC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_MINUS_MINUS:
/* postfix-expression -- */
/* Consume the `--' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTDECREMENT_EXPR);
/* Decrements may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
default:
if (pidk_return != NULL)
* pidk_return = idk;
if (member_access_only_p)
return is_member_access? postfix_expression : error_mark_node;
else
return postfix_expression;
}
}
/* We should never get here. */
gcc_unreachable ();
return error_mark_node;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression [ expression ]
postfix-expression [ braced-init-list ] (C++11)
FOR_OFFSETOF is set if we're being called in that context, which
changes how we deal with integer constant expressions. */
static tree
cp_parser_postfix_open_square_expression (cp_parser *parser,
tree postfix_expression,
bool for_offsetof)
{
tree index;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the index expression. */
/* ??? For offsetof, there is a question of what to allow here. If
offsetof is not being used in an integral constant expression context,
then we *could* get the right answer by computing the value at runtime.
If we are in an integral constant expression context, then we might
could accept any constant expression; hard to say without analysis.
Rather than open the barn door too wide right away, allow only integer
constant expressions here. */
if (for_offsetof)
index = cp_parser_constant_expression (parser, false, NULL);
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_nonconst_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
index = cp_parser_braced_list (parser, &expr_nonconst_p);
}
else
index = cp_parser_expression (parser, /*cast_p=*/false, NULL);
}
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Build the ARRAY_REF. */
postfix_expression = grok_array_decl (postfix_expression, index);
/* When not doing offsetof, array references are not permitted in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name
FOR_OFFSETOF is set if we're being called in that context. That sorta
limits what of the above we'll actually accept, but nevermind.
TOKEN_TYPE is the "." or "->" token, which will already have been
removed from the stream. */
static tree
cp_parser_postfix_dot_deref_expression (cp_parser *parser,
enum cpp_ttype token_type,
tree postfix_expression,
bool for_offsetof, cp_id_kind *idk,
location_t location)
{
tree name;
bool dependent_p;
bool pseudo_destructor_p;
tree scope = NULL_TREE;
/* If this is a `->' operator, dereference the pointer. */
if (token_type == CPP_DEREF)
postfix_expression = build_x_arrow (postfix_expression);
/* Check to see whether or not the expression is type-dependent. */
dependent_p = type_dependent_expression_p (postfix_expression);
/* The identifier following the `->' or `.' is not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
*idk = CP_ID_KIND_NONE;
/* Enter the scope corresponding to the type of the object
given by the POSTFIX_EXPRESSION. */
if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE)
{
scope = TREE_TYPE (postfix_expression);
/* According to the standard, no expression should ever have
reference type. Unfortunately, we do not currently match
the standard in this respect in that our internal representation
of an expression may have reference type even when the standard
says it does not. Therefore, we have to manually obtain the
underlying type here. */
scope = non_reference (scope);
/* The type of the POSTFIX_EXPRESSION must be complete. */
if (scope == unknown_type_node)
{
error_at (location, "%qE does not have class type",
postfix_expression);
scope = NULL_TREE;
}
/* Unlike the object expression in other contexts, *this is not
required to be of complete type for purposes of class member
access (5.2.5) outside the member function body. */
else if (scope != current_class_ref
&& !(processing_template_decl && scope == current_class_type))
scope = complete_type_or_else (scope, NULL_TREE);
/* Let the name lookup machinery know that we are processing a
class member access expression. */
parser->context->object_type = scope;
/* If something went wrong, we want to be able to discern that case,
as opposed to the case where there was no SCOPE due to the type
of expression being dependent. */
if (!scope)
scope = error_mark_node;
/* If the SCOPE was erroneous, make the various semantic analysis
functions exit quickly -- and without issuing additional error
messages. */
if (scope == error_mark_node)
postfix_expression = error_mark_node;
}
/* Assume this expression is not a pseudo-destructor access. */
pseudo_destructor_p = false;
/* If the SCOPE is a scalar type, then, if this is a valid program,
we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION
is type dependent, it can be pseudo-destructor-name or something else.
Try to parse it as pseudo-destructor-name first. */
if ((scope && SCALAR_TYPE_P (scope)) || dependent_p)
{
tree s;
tree type;
cp_parser_parse_tentatively (parser);
/* Parse the pseudo-destructor-name. */
s = NULL_TREE;
cp_parser_pseudo_destructor_name (parser, &s, &type);
if (dependent_p
&& (cp_parser_error_occurred (parser)
|| TREE_CODE (type) != TYPE_DECL
|| !SCALAR_TYPE_P (TREE_TYPE (type))))
cp_parser_abort_tentative_parse (parser);
else if (cp_parser_parse_definitely (parser))
{
pseudo_destructor_p = true;
postfix_expression
= finish_pseudo_destructor_expr (postfix_expression,
s, TREE_TYPE (type));
}
}
if (!pseudo_destructor_p)
{
/* If the SCOPE is not a scalar type, we are looking at an
ordinary class member access expression, rather than a
pseudo-destructor-name. */
bool template_p;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Parse the id-expression. */
name = (cp_parser_id_expression
(parser,
cp_parser_optional_template_keyword (parser),
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false));
/* In general, build a SCOPE_REF if the member name is qualified.
However, if the name was not dependent and has already been
resolved; there is no need to build the SCOPE_REF. For example;
struct X { void f(); };
template <typename T> void f(T* t) { t->X::f(); }
Even though "t" is dependent, "X::f" is not and has been resolved
to a BASELINK; there is no need to include scope information. */
/* But we do need to remember that there was an explicit scope for
virtual function calls. */
if (parser->scope)
*idk = CP_ID_KIND_QUALIFIED;
/* If the name is a template-id that names a type, we will get a
TYPE_DECL here. That is invalid code. */
if (TREE_CODE (name) == TYPE_DECL)
{
error_at (token->location, "invalid use of %qD", name);
postfix_expression = error_mark_node;
}
else
{
if (name != error_mark_node && !BASELINK_P (name) && parser->scope)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
{
error_at (token->location, "%<%D::%D%> is not a class member",
parser->scope, name);
postfix_expression = error_mark_node;
}
else
name = build_qualified_name (/*type=*/NULL_TREE,
parser->scope,
name,
template_p);
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
if (parser->scope && name && BASELINK_P (name))
adjust_result_of_qualified_name_lookup
(name, parser->scope, scope);
postfix_expression
= finish_class_member_access_expr (postfix_expression, name,
template_p,
tf_warning_or_error);
}
}
/* We no longer need to look up names in the scope of the object on
the left-hand side of the `.' or `->' operator. */
parser->context->object_type = NULL_TREE;
/* Outside of offsetof, these operators may not appear in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression
(parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* Parse a parenthesized expression-list.
expression-list:
assignment-expression
expression-list, assignment-expression
attribute-list:
expression-list
identifier
identifier, expression-list
CAST_P is true if this expression is the target of a cast.
ALLOW_EXPANSION_P is true if this expression allows expansion of an
argument pack.
Returns a vector of trees. Each element is a representation of an
assignment-expression. NULL is returned if the ( and or ) are
missing. An empty, but allocated, vector is returned on no
expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr
if we are parsing an attribute list for an attribute that wants a
plain identifier argument, normal_attr for an attribute that wants
an expression, or non_attr if we aren't parsing an attribute list. If
NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or
not all of the expressions in the list were constant. */
static VEC(tree,gc) *
cp_parser_parenthesized_expression_list (cp_parser* parser,
int is_attribute_list,
bool cast_p,
bool allow_expansion_p,
bool *non_constant_p)
{
VEC(tree,gc) *expression_list;
bool fold_expr_p = is_attribute_list != non_attr;
tree identifier = NULL_TREE;
bool saved_greater_than_is_operator_p;
/* Assume all the expressions will be constant. */
if (non_constant_p)
*non_constant_p = false;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
expression_list = make_tree_vector ();
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Consume expressions until there are no more. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
while (true)
{
tree expr;
/* At the beginning of attribute lists, check to see if the
next token is an identifier. */
if (is_attribute_list == id_attr
&& cp_lexer_peek_token (parser->lexer)->type == CPP_NAME)
{
cp_token *token;
/* Consume the identifier. */
token = cp_lexer_consume_token (parser->lexer);
/* Save the identifier. */
identifier = token->u.value;
}
else
{
bool expr_non_constant_p;
/* Parse the next assignment-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* A braced-init-list. */
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
if (non_constant_p && expr_non_constant_p)
*non_constant_p = true;
}
else if (non_constant_p)
{
expr = (cp_parser_constant_expression
(parser, /*allow_non_constant_p=*/true,
&expr_non_constant_p));
if (expr_non_constant_p)
*non_constant_p = true;
}
else
expr = cp_parser_assignment_expression (parser, cast_p, NULL);
if (fold_expr_p)
expr = fold_non_dependent_expr (expr);
/* If we have an ellipsis, then this is an expression
expansion. */
if (allow_expansion_p
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Build the argument pack. */
expr = make_pack_expansion (expr);
}
/* Add it to the list. We add error_mark_node
expressions to the list, so that we can still tell if
the correct form for a parenthesized expression-list
is found. That gives better errors. */
VEC_safe_push (tree, gc, expression_list, expr);
if (expr == error_mark_node)
goto skip_comma;
}
/* After the first item, attribute lists look the same as
expression lists. */
is_attribute_list = non_attr;
get_comma:;
/* If the next token isn't a `,', then we are done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
skip_comma:;
/* We try and resync to an unnested comma, as that will give the
user better diagnostics. */
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
if (!ending)
{
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
return NULL;
}
}
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
if (identifier)
VEC_safe_insert (tree, gc, expression_list, 0, identifier);
return expression_list;
}
/* Parse a pseudo-destructor-name.
pseudo-destructor-name:
:: [opt] nested-name-specifier [opt] type-name :: ~ type-name
:: [opt] nested-name-specifier template template-id :: ~ type-name
:: [opt] nested-name-specifier [opt] ~ type-name
If either of the first two productions is used, sets *SCOPE to the
TYPE specified before the final `::'. Otherwise, *SCOPE is set to
NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name,
or ERROR_MARK_NODE if the parse fails. */
static void
cp_parser_pseudo_destructor_name (cp_parser* parser,
tree* scope,
tree* type)
{
bool nested_name_specifier_p;
/* Assume that things will not work out. */
*type = error_mark_node;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
/* Now, if we saw a nested-name-specifier, we might be doing the
second production. */
if (nested_name_specifier_p
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-id. */
cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/false,
/*is_declaration=*/true);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
/* If the next token is not a `~', then there might be some
additional qualification. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL))
{
/* At this point, we're looking for "type-name :: ~". The type-name
must not be a class-name, since this is a pseudo-destructor. So,
it must be either an enum-name, or a typedef-name -- both of which
are just identifiers. So, we peek ahead to check that the "::"
and "~" tokens are present; if they are not, then we can avoid
calling type_name. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE
|| cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)
{
cp_parser_error (parser, "non-scalar type");
return;
}
/* Look for the type-name. */
*scope = TREE_TYPE (cp_parser_nonclass_name (parser));
if (*scope == error_mark_node)
return;
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
else
*scope = NULL_TREE;
/* Look for the `~'. */
cp_parser_require (parser, CPP_COMPL, RT_COMPL);
/* Once we see the ~, this has to be a pseudo-destructor. */
if (!processing_template_decl && !cp_parser_error_occurred (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Look for the type-name again. We are not responsible for
checking that it matches the first type-name. */
*type = cp_parser_nonclass_name (parser);
}
/* Parse a unary-expression.
unary-expression:
postfix-expression
++ cast-expression
-- cast-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-id )
alignof ( type-id ) [C++0x]
new-expression
delete-expression
GNU Extensions:
unary-expression:
__extension__ cast-expression
__alignof__ unary-expression
__alignof__ ( type-id )
alignof unary-expression [C++0x]
__real__ cast-expression
__imag__ cast-expression
&& identifier
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
cp_id_kind * pidk)
{
cp_token *token;
enum tree_code unary_operator;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some keywords give away the kind of expression. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_ALIGNOF:
case RID_SIZEOF:
{
tree operand;
enum tree_code op;
op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand. */
operand = cp_parser_sizeof_operand (parser, keyword);
if (TYPE_P (operand))
return cxx_sizeof_or_alignof_type (operand, op, true);
else
{
/* ISO C++ defines alignof only with types, not with
expressions. So pedwarn if alignof is used with a non-
type expression. However, __alignof__ is ok. */
if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof"))
pedwarn (token->location, OPT_pedantic,
"ISO C++ does not allow %<alignof%> "
"with a non-type");
return cxx_sizeof_or_alignof_expr (operand, op, true);
}
}
case RID_NEW:
return cp_parser_new_expression (parser);
case RID_DELETE:
return cp_parser_delete_expression (parser);
case RID_EXTENSION:
{
/* The saved value of the PEDANTIC flag. */
int saved_pedantic;
tree expr;
/* Save away the PEDANTIC flag. */
cp_parser_extension_opt (parser, &saved_pedantic);
/* Parse the cast-expression. */
expr = cp_parser_simple_cast_expression (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return expr;
}
case RID_REALPART:
case RID_IMAGPART:
{
tree expression;
/* Consume the `__real__' or `__imag__' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* Create the complete representation. */
return build_x_unary_op ((keyword == RID_REALPART
? REALPART_EXPR : IMAGPART_EXPR),
expression,
tf_warning_or_error);
}
break;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
return cp_parser_transaction_expression (parser, keyword);
case RID_NOEXCEPT:
{
tree expr;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool saved_greater_than_is_operator_p;
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in %<noexcept%> expressions");
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
expr = cp_parser_expression (parser, false, NULL);
--c_inhibit_evaluation_warnings;
--cp_unevaluated_operand;
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
parser->type_definition_forbidden_message = saved_message;
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return finish_noexcept_expr (expr, tf_warning_or_error);
}
default:
break;
}
}
/* Look for the `:: new' and `:: delete', which also signal the
beginning of a new-expression, or delete-expression,
respectively. If the next token is `::', then it might be one of
these. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
{
enum rid keyword;
/* See if the token after the `::' is one of the keywords in
which we're interested. */
keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword;
/* If it's `new', we have a new-expression. */
if (keyword == RID_NEW)
return cp_parser_new_expression (parser);
/* Similarly, for `delete'. */
else if (keyword == RID_DELETE)
return cp_parser_delete_expression (parser);
}
/* Look for a unary operator. */
unary_operator = cp_parser_unary_operator (token);
/* The `++' and `--' operators can be handled similarly, even though
they are not technically unary-operators in the grammar. */
if (unary_operator == ERROR_MARK)
{
if (token->type == CPP_PLUS_PLUS)
unary_operator = PREINCREMENT_EXPR;
else if (token->type == CPP_MINUS_MINUS)
unary_operator = PREDECREMENT_EXPR;
/* Handle the GNU address-of-label extension. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_AND_AND)
{
tree identifier;
tree expression;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
/* Consume the '&&' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
/* Create an expression representing the address. */
expression = finish_label_address_expr (identifier, loc);
if (cp_parser_non_integral_constant_expression (parser,
NIC_ADDR_LABEL))
expression = error_mark_node;
return expression;
}
}
if (unary_operator != ERROR_MARK)
{
tree cast_expression;
tree expression = error_mark_node;
non_integral_constant non_constant_p = NIC_NONE;
/* Consume the operator token. */
token = cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
cast_expression
= cp_parser_cast_expression (parser,
unary_operator == ADDR_EXPR,
/*cast_p=*/false, pidk);
/* Now, build an appropriate representation. */
switch (unary_operator)
{
case INDIRECT_REF:
non_constant_p = NIC_STAR;
expression = build_x_indirect_ref (cast_expression, RO_UNARY_STAR,
tf_warning_or_error);
break;
case ADDR_EXPR:
non_constant_p = NIC_ADDR;
/* Fall through. */
case BIT_NOT_EXPR:
expression = build_x_unary_op (unary_operator, cast_expression,
tf_warning_or_error);
break;
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
non_constant_p = unary_operator == PREINCREMENT_EXPR
? NIC_PREINCREMENT : NIC_PREDECREMENT;
/* Fall through. */
case UNARY_PLUS_EXPR:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
expression = finish_unary_op_expr (unary_operator, cast_expression);
break;
default:
gcc_unreachable ();
}
if (non_constant_p != NIC_NONE
&& cp_parser_non_integral_constant_expression (parser,
non_constant_p))
expression = error_mark_node;
return expression;
}
return cp_parser_postfix_expression (parser, address_p, cast_p,
/*member_access_only_p=*/false,
pidk);
}
/* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a
unary-operator, the corresponding tree code is returned. */
static enum tree_code
cp_parser_unary_operator (cp_token* token)
{
switch (token->type)
{
case CPP_MULT:
return INDIRECT_REF;
case CPP_AND:
return ADDR_EXPR;
case CPP_PLUS:
return UNARY_PLUS_EXPR;
case CPP_MINUS:
return NEGATE_EXPR;
case CPP_NOT:
return TRUTH_NOT_EXPR;
case CPP_COMPL:
return BIT_NOT_EXPR;
default:
return ERROR_MARK;
}
}
/* Parse a new-expression.
new-expression:
:: [opt] new new-placement [opt] new-type-id new-initializer [opt]
:: [opt] new new-placement [opt] ( type-id ) new-initializer [opt]
Returns a representation of the expression. */
static tree
cp_parser_new_expression (cp_parser* parser)
{
bool global_scope_p;
VEC(tree,gc) *placement;
tree type;
VEC(tree,gc) *initializer;
tree nelts;
tree ret;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `new' operator. */
cp_parser_require_keyword (parser, RID_NEW, RT_NEW);
/* There's no easy way to tell a new-placement from the
`( type-id )' construct. */
cp_parser_parse_tentatively (parser);
/* Look for a new-placement. */
placement = cp_parser_new_placement (parser);
/* If that didn't work out, there's no new-placement. */
if (!cp_parser_parse_definitely (parser))
{
if (placement != NULL)
release_tree_vector (placement);
placement = NULL;
}
/* If the next token is a `(', then we have a parenthesized
type-id. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_token *token;
const char *saved_message = parser->type_definition_forbidden_message;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-expression");
type = cp_parser_type_id (parser);
parser->type_definition_forbidden_message = saved_message;
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
token = cp_lexer_peek_token (parser->lexer);
/* There should not be a direct-new-declarator in this production,
but GCC used to allowed this, so we check and emit a sensible error
message for this case. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
error_at (token->location,
"array bound forbidden after parenthesized type-id");
inform (token->location,
"try removing the parentheses around the type-id");
cp_parser_direct_new_declarator (parser);
}
nelts = NULL_TREE;
}
/* Otherwise, there must be a new-type-id. */
else
type = cp_parser_new_type_id (parser, &nelts);
/* If the next token is a `(' or '{', then we have a new-initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
initializer = cp_parser_new_initializer (parser);
else
initializer = NULL;
/* A new-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_NEW))
ret = error_mark_node;
else
{
/* Create a representation of the new-expression. */
ret = build_new (&placement, type, nelts, &initializer, global_scope_p,
tf_warning_or_error);
}
if (placement != NULL)
release_tree_vector (placement);
if (initializer != NULL)
release_tree_vector (initializer);
return ret;
}
/* Parse a new-placement.
new-placement:
( expression-list )
Returns the same representation as for an expression-list. */
static VEC(tree,gc) *
cp_parser_new_placement (cp_parser* parser)
{
VEC(tree,gc) *expression_list;
/* Parse the expression-list. */
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a new-type-id.
new-type-id:
type-specifier-seq new-declarator [opt]
Returns the TYPE allocated. If the new-type-id indicates an array
type, *NELTS is set to the number of elements in the last array
bound; the TYPE will not include the last array bound. */
static tree
cp_parser_new_type_id (cp_parser* parser, tree *nelts)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *new_declarator;
cp_declarator *declarator;
cp_declarator *outer_declarator;
const char *saved_message;
tree type;
/* The type-specifier sequence must not contain type definitions.
(It cannot contain declarations of new types either, but if they
are not definitions we will catch that because they are not
complete.) */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-type-id");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifier_seq);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
/* Parse the new-declarator. */
new_declarator = cp_parser_new_declarator_opt (parser);
/* Determine the number of elements in the last array dimension, if
any. */
*nelts = NULL_TREE;
/* Skip down to the last array dimension. */
declarator = new_declarator;
outer_declarator = NULL;
while (declarator && (declarator->kind == cdk_pointer
|| declarator->kind == cdk_ptrmem))
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
while (declarator
&& declarator->kind == cdk_array
&& declarator->declarator
&& declarator->declarator->kind == cdk_array)
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
if (declarator && declarator->kind == cdk_array)
{
*nelts = declarator->u.array.bounds;
if (*nelts == error_mark_node)
*nelts = integer_one_node;
if (outer_declarator)
outer_declarator->declarator = declarator->declarator;
else
new_declarator = NULL;
}
type = groktypename (&type_specifier_seq, new_declarator, false);
return type;
}
/* Parse an (optional) new-declarator.
new-declarator:
ptr-operator new-declarator [opt]
direct-new-declarator
Returns the declarator. */
static cp_declarator *
cp_parser_new_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Look for a ptr-operator. */
code = cp_parser_ptr_operator (parser, &type, &cv_quals);
/* If that worked, look for more new-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_new_declarator_opt (parser);
return cp_parser_make_indirect_declarator
(code, type, cv_quals, declarator);
}
/* If the next token is a `[', there is a direct-new-declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
return cp_parser_direct_new_declarator (parser);
return NULL;
}
/* Parse a direct-new-declarator.
direct-new-declarator:
[ expression ]
direct-new-declarator [constant-expression]
*/
static cp_declarator *
cp_parser_direct_new_declarator (cp_parser* parser)
{
cp_declarator *declarator = NULL;
while (true)
{
tree expression;
/* Look for the opening `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
/* The first expression is not required to be constant. */
if (!declarator)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* The standard requires that the expression have integral
type. DR 74 adds enumeration types. We believe that the
real intent is that these expressions be handled like the
expression in a `switch' condition, which also allows
classes with a single conversion to integral or
enumeration type. */
if (!processing_template_decl)
{
expression
= build_expr_type_conversion (WANT_INT | WANT_ENUM,
expression,
/*complain=*/true);
if (!expression)
{
error_at (token->location,
"expression in new-declarator must have integral "
"or enumeration type");
expression = error_mark_node;
}
}
}
/* But all the other expressions must be. */
else
expression
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Add this bound to the declarator. */
declarator = make_array_declarator (declarator, expression);
/* If the next token is not a `[', then there are no more
bounds. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
break;
}
return declarator;
}
/* Parse a new-initializer.
new-initializer:
( expression-list [opt] )
braced-init-list
Returns a representation of the expression-list. */
static VEC(tree,gc) *
cp_parser_new_initializer (cp_parser* parser)
{
VEC(tree,gc) *expression_list;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
tree t;
bool expr_non_constant_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
t = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (t) = 1;
expression_list = make_tree_vector_single (t);
}
else
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a delete-expression.
delete-expression:
:: [opt] delete cast-expression
:: [opt] delete [ ] cast-expression
Returns a representation of the expression. */
static tree
cp_parser_delete_expression (cp_parser* parser)
{
bool global_scope_p;
bool array_p;
tree expression;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `delete' keyword. */
cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE);
/* See if the array syntax is in use. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Remember that this is the `[]' construct. */
array_p = true;
}
else
array_p = false;
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* A delete-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEL))
return error_mark_node;
return delete_sanity (expression, NULL_TREE, array_p, global_scope_p,
tf_warning_or_error);
}
/* Returns true if TOKEN may start a cast-expression and false
otherwise. */
static bool
cp_parser_tokens_start_cast_expression (cp_parser *parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_COMMA:
case CPP_SEMICOLON:
case CPP_QUERY:
case CPP_COLON:
case CPP_CLOSE_SQUARE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_BRACE:
case CPP_DOT:
case CPP_DOT_STAR:
case CPP_DEREF:
case CPP_DEREF_STAR:
case CPP_DIV:
case CPP_MOD:
case CPP_LSHIFT:
case CPP_RSHIFT:
case CPP_LESS:
case CPP_GREATER:
case CPP_LESS_EQ:
case CPP_GREATER_EQ:
case CPP_EQ_EQ:
case CPP_NOT_EQ:
case CPP_EQ:
case CPP_MULT_EQ:
case CPP_DIV_EQ:
case CPP_MOD_EQ:
case CPP_PLUS_EQ:
case CPP_MINUS_EQ:
case CPP_RSHIFT_EQ:
case CPP_LSHIFT_EQ:
case CPP_AND_EQ:
case CPP_XOR_EQ:
case CPP_OR_EQ:
case CPP_XOR:
case CPP_OR:
case CPP_OR_OR:
case CPP_EOF:
return false;
case CPP_OPEN_PAREN:
/* In ((type ()) () the last () isn't a valid cast-expression,
so the whole must be parsed as postfix-expression. */
return cp_lexer_peek_nth_token (parser->lexer, 2)->type
!= CPP_CLOSE_PAREN;
/* '[' may start a primary-expression in obj-c++. */
case CPP_OPEN_SQUARE:
return c_dialect_objc ();
default:
return true;
}
}
/* Parse a cast-expression.
cast-expression:
unary-expression
( type-id ) cast-expression
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p,
cp_id_kind * pidk)
{
/* If it's a `(', then we might be looking at a cast. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type = NULL_TREE;
tree expr = NULL_TREE;
bool compound_literal_p;
const char *saved_message;
/* There's no way to know yet whether or not this is a cast.
For example, `(int (3))' is a unary-expression, while `(int)
3' is a cast. So, we resort to parsing tentatively. */
cp_parser_parse_tentatively (parser);
/* Types may not be defined in a cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* A very tricky bit is that `(struct S) { 3 }' is a
compound-literal (which we permit in C++ as an extension).
But, that construct is not a cast-expression -- it is a
postfix-expression. (The reason is that `(struct S) { 3 }.i'
is legal; if the compound-literal were a cast-expression,
you'd need an extra set of parentheses.) But, if we parse
the type-id, and it happens to be a class-specifier, then we
will commit to the parse at that point, because we cannot
undo the action that is done when creating a new class. So,
then we cannot back up and do a postfix-expression.
Therefore, we scan ahead to the closing `)', and check to see
if the token after the `)' is a `{'. If so, we are not
looking at a cast-expression.
Save tokens so that we can put them back. */
cp_lexer_save_tokens (parser->lexer);
/* Skip tokens until the next token is a closing parenthesis.
If we find the closing `)', and the next token is a `{', then
we are looking at a compound-literal. */
compound_literal_p
= (cp_parser_skip_to_closing_parenthesis (parser, false, false,
/*consume_paren=*/true)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE));
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
/* If we were looking at a compound-literal, simulate an error
so that the call to cp_parser_parse_definitely below will
fail. */
if (compound_literal_p)
cp_parser_simulate_error (parser);
else
{
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
/* Look for the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* At this point this can only be either a cast or a
parenthesized ctor such as `(T ())' that looks like a cast to
function returning T. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_tokens_start_cast_expression (parser))
{
cp_parser_parse_definitely (parser);
expr = cp_parser_cast_expression (parser,
/*address_p=*/false,
/*cast_p=*/true, pidk);
/* Warn about old-style casts, if so requested. */
if (warn_old_style_cast
&& !in_system_header
&& !VOID_TYPE_P (type)
&& current_lang_name != lang_name_c)
warning (OPT_Wold_style_cast, "use of old-style cast");
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CAST))
return error_mark_node;
/* Perform the cast. */
expr = build_c_cast (input_location, type, expr);
return expr;
}
else
cp_parser_abort_tentative_parse (parser);
}
/* If we get here, then it's not a cast, so it must be a
unary-expression. */
return cp_parser_unary_expression (parser, address_p, cast_p, pidk);
}
/* Parse a binary expression of the general form:
pm-expression:
cast-expression
pm-expression .* cast-expression
pm-expression ->* cast-expression
multiplicative-expression:
pm-expression
multiplicative-expression * pm-expression
multiplicative-expression / pm-expression
multiplicative-expression % pm-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
GNU Extension:
relational-expression:
relational-expression <? shift-expression
relational-expression >? shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
and-expression:
equality-expression
and-expression & equality-expression
exclusive-or-expression:
and-expression
exclusive-or-expression ^ and-expression
inclusive-or-expression:
exclusive-or-expression
inclusive-or-expression | exclusive-or-expression
logical-and-expression:
inclusive-or-expression
logical-and-expression && inclusive-or-expression
logical-or-expression:
logical-and-expression
logical-or-expression || logical-and-expression
All these are implemented with a single function like:
binary-expression:
simple-cast-expression
binary-expression <token> binary-expression
CAST_P is true if this expression is the target of a cast.
The binops_by_token map is used to get the tree codes for each <token> type.
binary-expressions are associated according to a precedence table. */
#define TOKEN_PRECEDENCE(token) \
(((token->type == CPP_GREATER \
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \
&& !parser->greater_than_is_operator_p) \
? PREC_NOT_OPERATOR \
: binops_by_token[token->type].prec)
static tree
cp_parser_binary_expression (cp_parser* parser, bool cast_p,
bool no_toplevel_fold_p,
enum cp_parser_prec prec,
cp_id_kind * pidk)
{
cp_parser_expression_stack stack;
cp_parser_expression_stack_entry *sp = &stack[0];
tree lhs, rhs;
cp_token *token;
enum tree_code tree_type, lhs_type, rhs_type;
enum cp_parser_prec new_prec, lookahead_prec;
tree overload;
/* Parse the first expression. */
lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p, pidk);
lhs_type = ERROR_MARK;
if (cp_parser_error_occurred (parser))
return error_mark_node;
for (;;)
{
/* Get an operator token. */
token = cp_lexer_peek_token (parser->lexer);
if (warn_cxx0x_compat
&& token->type == CPP_RSHIFT
&& !parser->greater_than_is_operator_p)
{
if (warning_at (token->location, OPT_Wc__0x_compat,
"%<>>%> operator is treated as"
" two right angle brackets in C++11"))
inform (token->location,
"suggest parentheses around %<>>%> expression");
}
new_prec = TOKEN_PRECEDENCE (token);
/* Popping an entry off the stack means we completed a subexpression:
- either we found a token which is not an operator (`>' where it is not
an operator, or prec == PREC_NOT_OPERATOR), in which case popping
will happen repeatedly;
- or, we found an operator which has lower priority. This is the case
where the recursive descent *ascends*, as in `3 * 4 + 5' after
parsing `3 * 4'. */
if (new_prec <= prec)
{
if (sp == stack)
break;
else
goto pop;
}
get_rhs:
tree_type = binops_by_token[token->type].tree_type;
/* We used the operator token. */
cp_lexer_consume_token (parser->lexer);
/* For "false && x" or "true || x", x will never be executed;
disable warnings while evaluating it. */
if (tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings += lhs == truthvalue_false_node;
else if (tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings += lhs == truthvalue_true_node;
/* Extract another operand. It may be the RHS of this expression
or the LHS of a new, higher priority expression. */
rhs = cp_parser_simple_cast_expression (parser);
rhs_type = ERROR_MARK;
/* Get another operator token. Look up its precedence to avoid
building a useless (immediately popped) stack entry for common
cases such as 3 + 4 + 5 or 3 * 4 + 5. */
token = cp_lexer_peek_token (parser->lexer);
lookahead_prec = TOKEN_PRECEDENCE (token);
if (lookahead_prec > new_prec)
{
/* ... and prepare to parse the RHS of the new, higher priority
expression. Since precedence levels on the stack are
monotonically increasing, we do not have to care about
stack overflows. */
sp->prec = prec;
sp->tree_type = tree_type;
sp->lhs = lhs;
sp->lhs_type = lhs_type;
sp++;
lhs = rhs;
lhs_type = rhs_type;
prec = new_prec;
new_prec = lookahead_prec;
goto get_rhs;
pop:
lookahead_prec = new_prec;
/* If the stack is not empty, we have parsed into LHS the right side
(`4' in the example above) of an expression we had suspended.
We can use the information on the stack to recover the LHS (`3')
from the stack together with the tree code (`MULT_EXPR'), and
the precedence of the higher level subexpression
(`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token,
which will be used to actually build the additive expression. */
--sp;
prec = sp->prec;
tree_type = sp->tree_type;
rhs = lhs;
rhs_type = lhs_type;
lhs = sp->lhs;
lhs_type = sp->lhs_type;
}
/* Undo the disabling of warnings done above. */
if (tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings -= lhs == truthvalue_false_node;
else if (tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings -= lhs == truthvalue_true_node;
overload = NULL;
/* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type ==
ERROR_MARK for everything that is not a binary expression.
This makes warn_about_parentheses miss some warnings that
involve unary operators. For unary expressions we should
pass the correct tree_code unless the unary expression was
surrounded by parentheses.
*/
if (no_toplevel_fold_p
&& lookahead_prec <= prec
&& sp == stack
&& TREE_CODE_CLASS (tree_type) == tcc_comparison)
lhs = build2 (tree_type, boolean_type_node, lhs, rhs);
else
lhs = build_x_binary_op (tree_type, lhs, lhs_type, rhs, rhs_type,
&overload, tf_warning_or_error);
lhs_type = tree_type;
/* If the binary operator required the use of an overloaded operator,
then this expression cannot be an integral constant-expression.
An overloaded operator can be used even if both operands are
otherwise permissible in an integral constant-expression if at
least one of the operands is of enumeration type. */
if (overload
&& cp_parser_non_integral_constant_expression (parser,
NIC_OVERLOADED))
return error_mark_node;
}
return lhs;
}
/* Parse the `? expression : assignment-expression' part of a
conditional-expression. The LOGICAL_OR_EXPR is the
logical-or-expression that started the conditional-expression.
Returns a representation of the entire conditional-expression.
This routine is used by cp_parser_assignment_expression.
? expression : assignment-expression
GNU Extensions:
? : assignment-expression */
static tree
cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr)
{
tree expr;
tree assignment_expr;
struct cp_token *token;
/* Consume the `?' token. */
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_COLON)
{
pedwarn (token->location, OPT_pedantic,
"ISO C++ does not allow ?: with omitted middle operand");
/* Implicit true clause. */
expr = NULL_TREE;
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node;
warn_for_omitted_condop (token->location, logical_or_expr);
}
else
{
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse the expression. */
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node;
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
c_inhibit_evaluation_warnings +=
((logical_or_expr == truthvalue_true_node)
- (logical_or_expr == truthvalue_false_node));
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* The next token should be a `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Parse the assignment-expression. */
assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL);
c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node;
/* Build the conditional-expression. */
return build_x_conditional_expr (logical_or_expr,
expr,
assignment_expr,
tf_warning_or_error);
}
/* Parse an assignment-expression.
assignment-expression:
conditional-expression
logical-or-expression assignment-operator assignment_expression
throw-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation for the expression. */
static tree
cp_parser_assignment_expression (cp_parser* parser, bool cast_p,
cp_id_kind * pidk)
{
tree expr;
/* If the next token is the `throw' keyword, then we're looking at
a throw-expression. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW))
expr = cp_parser_throw_expression (parser);
/* Otherwise, it must be that we are looking at a
logical-or-expression. */
else
{
/* Parse the binary expressions (logical-or-expression). */
expr = cp_parser_binary_expression (parser, cast_p, false,
PREC_NOT_OPERATOR, pidk);
/* If the next token is a `?' then we're actually looking at a
conditional-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY))
return cp_parser_question_colon_clause (parser, expr);
else
{
enum tree_code assignment_operator;
/* If it's an assignment-operator, we're using the second
production. */
assignment_operator
= cp_parser_assignment_operator_opt (parser);
if (assignment_operator != ERROR_MARK)
{
bool non_constant_p;
/* Parse the right-hand side of the assignment. */
tree rhs = cp_parser_initializer_clause (parser, &non_constant_p);
if (BRACE_ENCLOSED_INITIALIZER_P (rhs))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* An assignment may not appear in a
constant-expression. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_ASSIGNMENT))
return error_mark_node;
/* Build the assignment expression. */
expr = build_x_modify_expr (expr,
assignment_operator,
rhs,
tf_warning_or_error);
}
}
}
return expr;
}
/* Parse an (optional) assignment-operator.
assignment-operator: one of
= *= /= %= += -= >>= <<= &= ^= |=
GNU Extension:
assignment-operator: one of
<?= >?=
If the next token is an assignment operator, the corresponding tree
code is returned, and the token is consumed. For example, for
`+=', PLUS_EXPR is returned. For `=' itself, the code returned is
NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%',
TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment
operator, ERROR_MARK is returned. */
static enum tree_code
cp_parser_assignment_operator_opt (cp_parser* parser)
{
enum tree_code op;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EQ:
op = NOP_EXPR;
break;
case CPP_MULT_EQ:
op = MULT_EXPR;
break;
case CPP_DIV_EQ:
op = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
op = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
op = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
op = MINUS_EXPR;
break;
case CPP_RSHIFT_EQ:
op = RSHIFT_EXPR;
break;
case CPP_LSHIFT_EQ:
op = LSHIFT_EXPR;
break;
case CPP_AND_EQ:
op = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
op = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
op = BIT_IOR_EXPR;
break;
default:
/* Nothing else is an assignment operator. */
op = ERROR_MARK;
}
/* If it was an assignment operator, consume it. */
if (op != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
return op;
}
/* Parse an expression.
expression:
assignment-expression
expression , assignment-expression
CAST_P is true if this expression is the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk)
{
tree expression = NULL_TREE;
while (true)
{
tree assignment_expression;
/* Parse the next assignment-expression. */
assignment_expression
= cp_parser_assignment_expression (parser, cast_p, pidk);
/* If this is the first assignment-expression, we can just
save it away. */
if (!expression)
expression = assignment_expression;
else
expression = build_x_compound_expr (expression,
assignment_expression,
tf_warning_or_error);
/* If the next token is not a comma, then we are done with the
expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* A comma operator cannot appear in a constant-expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA))
expression = error_mark_node;
}
return expression;
}
/* Parse a constant-expression.
constant-expression:
conditional-expression
If ALLOW_NON_CONSTANT_P a non-constant expression is silently
accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not
constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P
is false, NON_CONSTANT_P should be NULL. */
static tree
cp_parser_constant_expression (cp_parser* parser,
bool allow_non_constant_p,
bool *non_constant_p)
{
bool saved_integral_constant_expression_p;
bool saved_allow_non_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
tree expression;
/* It might seem that we could simply parse the
conditional-expression, and then check to see if it were
TREE_CONSTANT. However, an expression that is TREE_CONSTANT is
one that the compiler can figure out is constant, possibly after
doing some simplifications or optimizations. The standard has a
precise definition of constant-expression, and we must honor
that, even though it is somewhat more restrictive.
For example:
int i[(2, 3)];
is not a legal declaration, because `(2, 3)' is not a
constant-expression. The `,' operator is forbidden in a
constant-expression. However, GCC's constant-folding machinery
will fold this operation to an INTEGER_CST for `3'. */
/* Save the old settings. */
saved_integral_constant_expression_p = parser->integral_constant_expression_p;
saved_allow_non_integral_constant_expression_p
= parser->allow_non_integral_constant_expression_p;
saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p;
/* We are now parsing a constant-expression. */
parser->integral_constant_expression_p = true;
parser->allow_non_integral_constant_expression_p
= (allow_non_constant_p || cxx_dialect >= cxx0x);
parser->non_integral_constant_expression_p = false;
/* Although the grammar says "conditional-expression", we parse an
"assignment-expression", which also permits "throw-expression"
and the use of assignment operators. In the case that
ALLOW_NON_CONSTANT_P is false, we get better errors than we would
otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is
actually essential that we look for an assignment-expression.
For example, cp_parser_initializer_clauses uses this function to
determine whether a particular assignment-expression is in fact
constant. */
expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL);
/* Restore the old settings. */
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->allow_non_integral_constant_expression_p
= saved_allow_non_integral_constant_expression_p;
if (cxx_dialect >= cxx0x)
{
/* Require an rvalue constant expression here; that's what our
callers expect. Reference constant expressions are handled
separately in e.g. cp_parser_template_argument. */
bool is_const = potential_rvalue_constant_expression (expression);
parser->non_integral_constant_expression_p = !is_const;
if (!is_const && !allow_non_constant_p)
require_potential_rvalue_constant_expression (expression);
}
if (allow_non_constant_p)
*non_constant_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expression;
}
/* Parse __builtin_offsetof.
offsetof-expression:
"__builtin_offsetof" "(" type-id "," offsetof-member-designator ")"
offsetof-member-designator:
id-expression
| offsetof-member-designator "." id-expression
| offsetof-member-designator "[" expression "]"
| offsetof-member-designator "->" id-expression */
static tree
cp_parser_builtin_offsetof (cp_parser *parser)
{
int save_ice_p, save_non_ice_p;
tree type, expr;
cp_id_kind dummy;
cp_token *token;
/* We're about to accept non-integral-constant things, but will
definitely yield an integral constant expression. Save and
restore these values around our local parsing. */
save_ice_p = parser->integral_constant_expression_p;
save_non_ice_p = parser->non_integral_constant_expression_p;
/* Consume the "__builtin_offsetof" token. */
cp_lexer_consume_token (parser->lexer);
/* Consume the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
token = cp_lexer_peek_token (parser->lexer);
/* Build the (type *)null that begins the traditional offsetof macro. */
expr = build_static_cast (build_pointer_type (type), null_pointer_node,
tf_warning_or_error);
/* Parse the offsetof-member-designator. We begin as if we saw "expr->". */
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr,
true, &dummy, token->location);
while (true)
{
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
/* offsetof-member-designator "[" expression "]" */
expr = cp_parser_postfix_open_square_expression (parser, expr, true);
break;
case CPP_DEREF:
/* offsetof-member-designator "->" identifier */
expr = grok_array_decl (expr, integer_zero_node);
/* FALLTHRU */
case CPP_DOT:
/* offsetof-member-designator "." identifier */
cp_lexer_consume_token (parser->lexer);
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT,
expr, true, &dummy,
token->location);
break;
case CPP_CLOSE_PAREN:
/* Consume the ")" token. */
cp_lexer_consume_token (parser->lexer);
goto success;
default:
/* Error. We know the following require will fail, but
that gives the proper error message. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_skip_to_closing_parenthesis (parser, true, false, true);
expr = error_mark_node;
goto failure;
}
}
success:
/* If we're processing a template, we can't finish the semantics yet.
Otherwise we can fold the entire expression now. */
if (processing_template_decl)
expr = build1 (OFFSETOF_EXPR, size_type_node, expr);
else
expr = finish_offsetof (expr);
failure:
parser->integral_constant_expression_p = save_ice_p;
parser->non_integral_constant_expression_p = save_non_ice_p;
return expr;
}
/* Parse a trait expression.
Returns a representation of the expression, the underlying type
of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */
static tree
cp_parser_trait_expr (cp_parser* parser, enum rid keyword)
{
cp_trait_kind kind;
tree type1, type2 = NULL_TREE;
bool binary = false;
cp_decl_specifier_seq decl_specs;
switch (keyword)
{
case RID_HAS_NOTHROW_ASSIGN:
kind = CPTK_HAS_NOTHROW_ASSIGN;
break;
case RID_HAS_NOTHROW_CONSTRUCTOR:
kind = CPTK_HAS_NOTHROW_CONSTRUCTOR;
break;
case RID_HAS_NOTHROW_COPY:
kind = CPTK_HAS_NOTHROW_COPY;
break;
case RID_HAS_TRIVIAL_ASSIGN:
kind = CPTK_HAS_TRIVIAL_ASSIGN;
break;
case RID_HAS_TRIVIAL_CONSTRUCTOR:
kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR;
break;
case RID_HAS_TRIVIAL_COPY:
kind = CPTK_HAS_TRIVIAL_COPY;
break;
case RID_HAS_TRIVIAL_DESTRUCTOR:
kind = CPTK_HAS_TRIVIAL_DESTRUCTOR;
break;
case RID_HAS_VIRTUAL_DESTRUCTOR:
kind = CPTK_HAS_VIRTUAL_DESTRUCTOR;
break;
case RID_IS_ABSTRACT:
kind = CPTK_IS_ABSTRACT;
break;
case RID_IS_BASE_OF:
kind = CPTK_IS_BASE_OF;
binary = true;
break;
case RID_IS_CLASS:
kind = CPTK_IS_CLASS;
break;
case RID_IS_CONVERTIBLE_TO:
kind = CPTK_IS_CONVERTIBLE_TO;
binary = true;
break;
case RID_IS_EMPTY:
kind = CPTK_IS_EMPTY;
break;
case RID_IS_ENUM:
kind = CPTK_IS_ENUM;
break;
case RID_IS_FINAL:
kind = CPTK_IS_FINAL;
break;
case RID_IS_LITERAL_TYPE:
kind = CPTK_IS_LITERAL_TYPE;
break;
case RID_IS_POD:
kind = CPTK_IS_POD;
break;
case RID_IS_POLYMORPHIC:
kind = CPTK_IS_POLYMORPHIC;
break;
case RID_IS_STD_LAYOUT:
kind = CPTK_IS_STD_LAYOUT;
break;
case RID_IS_TRIVIAL:
kind = CPTK_IS_TRIVIAL;
break;
case RID_IS_UNION:
kind = CPTK_IS_UNION;
break;
case RID_UNDERLYING_TYPE:
kind = CPTK_UNDERLYING_TYPE;
break;
case RID_BASES:
kind = CPTK_BASES;
break;
case RID_DIRECT_BASES:
kind = CPTK_DIRECT_BASES;
break;
default:
gcc_unreachable ();
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
type1 = cp_parser_type_id (parser);
if (type1 == error_mark_node)
return error_mark_node;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type1;
/* Call grokdeclarator to figure out what type this is. */
type1 = grokdeclarator (NULL, &decl_specs, TYPENAME,
/*initialized=*/0, /*attrlist=*/NULL);
if (binary)
{
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
type2 = cp_parser_type_id (parser);
if (type2 == error_mark_node)
return error_mark_node;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type2;
/* Call grokdeclarator to figure out what type this is. */
type2 = grokdeclarator (NULL, &decl_specs, TYPENAME,
/*initialized=*/0, /*attrlist=*/NULL);
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Complete the trait expression, which may mean either processing
the trait expr now or saving it for template instantiation. */
switch(kind)
{
case CPTK_UNDERLYING_TYPE:
return finish_underlying_type (type1);
case CPTK_BASES:
return finish_bases (type1, false);
case CPTK_DIRECT_BASES:
return finish_bases (type1, true);
default:
return finish_trait_expr (kind, type1, type2);
}
}
/* Lambdas that appear in variable initializer or default argument scope
get that in their mangling, so we need to record it. We might as well
use the count for function and namespace scopes as well. */
static GTY(()) tree lambda_scope;
static GTY(()) int lambda_count;
typedef struct GTY(()) tree_int
{
tree t;
int i;
} tree_int;
DEF_VEC_O(tree_int);
DEF_VEC_ALLOC_O(tree_int,gc);
static GTY(()) VEC(tree_int,gc) *lambda_scope_stack;
static void
start_lambda_scope (tree decl)
{
tree_int ti;
gcc_assert (decl);
/* Once we're inside a function, we ignore other scopes and just push
the function again so that popping works properly. */
if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL)
decl = current_function_decl;
ti.t = lambda_scope;
ti.i = lambda_count;
VEC_safe_push (tree_int, gc, lambda_scope_stack, &ti);
if (lambda_scope != decl)
{
/* Don't reset the count if we're still in the same function. */
lambda_scope = decl;
lambda_count = 0;
}
}
static void
record_lambda_scope (tree lambda)
{
LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope;
LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++;
}
static void
finish_lambda_scope (void)
{
tree_int *p = VEC_last (tree_int, lambda_scope_stack);
if (lambda_scope != p->t)
{
lambda_scope = p->t;
lambda_count = p->i;
}
VEC_pop (tree_int, lambda_scope_stack);
}
/* Parse a lambda expression.
lambda-expression:
lambda-introducer lambda-declarator [opt] compound-statement
Returns a representation of the expression. */
static tree
cp_parser_lambda_expression (cp_parser* parser)
{
tree lambda_expr = build_lambda_expr ();
tree type;
bool ok;
LAMBDA_EXPR_LOCATION (lambda_expr)
= cp_lexer_peek_token (parser->lexer)->location;
if (cp_unevaluated_operand)
error_at (LAMBDA_EXPR_LOCATION (lambda_expr),
"lambda-expression in unevaluated context");
/* We may be in the middle of deferred access check. Disable
it now. */
push_deferring_access_checks (dk_no_deferred);
cp_parser_lambda_introducer (parser, lambda_expr);
type = begin_lambda_type (lambda_expr);
if (type == error_mark_node)
return error_mark_node;
record_lambda_scope (lambda_expr);
/* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */
determine_visibility (TYPE_NAME (type));
/* Now that we've started the type, add the capture fields for any
explicit captures. */
register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr));
{
/* Inside the class, surrounding template-parameter-lists do not apply. */
unsigned int saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
unsigned char in_statement = parser->in_statement;
bool in_switch_statement_p = parser->in_switch_statement_p;
parser->num_template_parameter_lists = 0;
parser->in_statement = 0;
parser->in_switch_statement_p = false;
/* By virtue of defining a local class, a lambda expression has access to
the private variables of enclosing classes. */
ok = cp_parser_lambda_declarator_opt (parser, lambda_expr);
if (ok)
cp_parser_lambda_body (parser, lambda_expr);
else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
cp_parser_skip_to_end_of_block_or_statement (parser);
/* The capture list was built up in reverse order; fix that now. */
{
tree newlist = NULL_TREE;
tree elt, next;
for (elt = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr);
elt; elt = next)
{
next = TREE_CHAIN (elt);
TREE_CHAIN (elt) = newlist;
newlist = elt;
}
LAMBDA_EXPR_CAPTURE_LIST (lambda_expr) = newlist;
}
if (ok)
maybe_add_lambda_conv_op (type);
type = finish_struct (type, /*attributes=*/NULL_TREE);
parser->num_template_parameter_lists = saved_num_template_parameter_lists;
parser->in_statement = in_statement;
parser->in_switch_statement_p = in_switch_statement_p;
}
pop_deferring_access_checks ();
/* This field is only used during parsing of the lambda. */
LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE;
/* This lambda shouldn't have any proxies left at this point. */
gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL);
/* And now that we're done, push proxies for an enclosing lambda. */
insert_pending_capture_proxies ();
if (ok)
return build_lambda_object (lambda_expr);
else
return error_mark_node;
}
/* Parse the beginning of a lambda expression.
lambda-introducer:
[ lambda-capture [opt] ]
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
{
/* Need commas after the first capture. */
bool first = true;
/* Eat the leading `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
/* Record default capture mode. "[&" "[=" "[&," "[=," */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME)
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE)
{
cp_lexer_consume_token (parser->lexer);
first = false;
}
while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE))
{
cp_token* capture_token;
tree capture_id;
tree capture_init_expr;
cp_id_kind idk = CP_ID_KIND_NONE;
bool explicit_init_p = false;
enum capture_kind_type
{
BY_COPY,
BY_REFERENCE
};
enum capture_kind_type capture_kind = BY_COPY;
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
error ("expected end of capture-list");
return;
}
if (first)
first = false;
else
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Possibly capture `this'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY)
pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant "
"with by-copy capture default");
cp_lexer_consume_token (parser->lexer);
add_capture (lambda_expr,
/*id=*/this_identifier,
/*initializer=*/finish_this_expr(),
/*by_reference_p=*/false,
explicit_init_p);
continue;
}
/* Remember whether we want to capture as a reference or not. */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND))
{
capture_kind = BY_REFERENCE;
cp_lexer_consume_token (parser->lexer);
}
/* Get the identifier. */
capture_token = cp_lexer_peek_token (parser->lexer);
capture_id = cp_parser_identifier (parser);
if (capture_id == error_mark_node)
/* Would be nice to have a cp_parser_skip_to_closing_x for general
delimiters, but I modified this to stop on unnested ']' as well. It
was already changed to stop on unnested '}', so the
"closing_parenthesis" name is no more misleading with my change. */
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
break;
}
/* Find the initializer for this capture. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* An explicit expression exists. */
cp_lexer_consume_token (parser->lexer);
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow initializers "
"in lambda expression capture lists");
capture_init_expr = cp_parser_assignment_expression (parser,
/*cast_p=*/true,
&idk);
explicit_init_p = true;
}
else
{
const char* error_msg;
/* Turn the identifier into an id-expression. */
capture_init_expr
= cp_parser_lookup_name
(parser,
capture_id,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
capture_token->location);
if (capture_init_expr == error_mark_node)
{
unqualified_name_lookup_error (capture_id);
continue;
}
else if (DECL_P (capture_init_expr)
&& (TREE_CODE (capture_init_expr) != VAR_DECL
&& TREE_CODE (capture_init_expr) != PARM_DECL))
{
error_at (capture_token->location,
"capture of non-variable %qD ",
capture_init_expr);
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
if (TREE_CODE (capture_init_expr) == VAR_DECL
&& decl_storage_duration (capture_init_expr) != dk_auto)
{
pedwarn (capture_token->location, 0, "capture of variable "
"%qD with non-automatic storage duration",
capture_init_expr);
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
capture_init_expr
= finish_id_expression
(capture_id,
capture_init_expr,
parser->scope,
&idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/false,
/*non_integral_constant_expression_p=*/NULL,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
capture_token->location);
}
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE
&& !explicit_init_p)
{
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY
&& capture_kind == BY_COPY)
pedwarn (capture_token->location, 0, "explicit by-copy capture "
"of %qD redundant with by-copy capture default",
capture_id);
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE
&& capture_kind == BY_REFERENCE)
pedwarn (capture_token->location, 0, "explicit by-reference "
"capture of %qD redundant with by-reference capture "
"default", capture_id);
}
add_capture (lambda_expr,
capture_id,
capture_init_expr,
/*by_reference_p=*/capture_kind == BY_REFERENCE,
explicit_init_p);
}
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
/* Parse the (optional) middle of a lambda expression.
lambda-declarator:
( parameter-declaration-clause [opt] )
attribute-specifier [opt]
mutable [opt]
exception-specification [opt]
lambda-return-type-clause [opt]
LAMBDA_EXPR is the current representation of the lambda expression. */
static bool
cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
{
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a lambda-declarator, it is as if
the lambda-declarator were ().
This means an empty parameter list, no attributes, and no exception
specification. */
tree param_list = void_list_node;
tree attributes = NULL_TREE;
tree exception_spec = NULL_TREE;
tree t;
/* The lambda-declarator is optional, but must begin with an opening
parenthesis if present. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
begin_scope (sk_function_parms, /*entity=*/NULL_TREE);
/* Parse parameters. */
param_list = cp_parser_parameter_declaration_clause (parser);
/* Default arguments shall not be specified in the
parameter-declaration-clause of a lambda-declarator. */
for (t = param_list; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_pedantic,
"default argument specified for lambda parameter");
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
attributes = cp_parser_attributes_opt (parser);
/* Parse optional `mutable' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1;
}
/* Parse optional exception specification. */
exception_spec = cp_parser_exception_specification_opt (parser);
/* Parse optional trailing return type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_RETURN_TYPE (lambda_expr) = cp_parser_type_id (parser);
}
/* The function parameters must be in scope all the way until after the
trailing-return-type in case of decltype. */
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope ();
}
/* Create the function call operator.
Messing with declarators like this is no uglier than building up the
FUNCTION_DECL by hand, and this is less likely to get out of sync with
other code. */
{
cp_decl_specifier_seq return_type_specs;
cp_declarator* declarator;
tree fco;
int quals;
void *p;
clear_decl_specs (&return_type_specs);
if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr);
else
/* Maybe we will deduce the return type later, but we can use void
as a placeholder return type anyways. */
return_type_specs.type = void_type_node;
p = obstack_alloc (&declarator_obstack, 0);
declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR),
sfk_none);
quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr)
? TYPE_UNQUALIFIED : TYPE_QUAL_CONST);
declarator = make_call_declarator (declarator, param_list, quals,
VIRT_SPEC_UNSPECIFIED,
exception_spec,
/*late_return_type=*/NULL_TREE);
declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr);
fco = grokmethod (&return_type_specs,
declarator,
attributes);
if (fco != error_mark_node)
{
DECL_INITIALIZED_IN_CLASS_P (fco) = 1;
DECL_ARTIFICIAL (fco) = 1;
/* Give the object parameter a different name. */
DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure");
}
finish_member_declaration (fco);
obstack_free (&declarator_obstack, p);
return (fco != error_mark_node);
}
}
/* Parse the body of a lambda expression, which is simply
compound-statement
but which requires special handling.
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_body (cp_parser* parser, tree lambda_expr)
{
bool nested = (current_function_decl != NULL_TREE);
bool local_variables_forbidden_p = parser->local_variables_forbidden_p;
if (nested)
push_function_context ();
else
/* Still increment function_depth so that we don't GC in the
middle of an expression. */
++function_depth;
/* Clear this in case we're in the middle of a default argument. */
parser->local_variables_forbidden_p = false;
/* Finish the function call operator
- class_specifier
+ late_parsing_for_member
+ function_definition_after_declarator
+ ctor_initializer_opt_and_function_body */
{
tree fco = lambda_function (lambda_expr);
tree body;
bool done = false;
tree compound_stmt;
tree cap;
/* Let the front end know that we are going to be defining this
function. */
start_preparsed_function (fco,
NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
start_lambda_scope (fco);
body = begin_function_body ();
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
goto out;
/* Push the proxies for any explicit captures. */
for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap;
cap = TREE_CHAIN (cap))
build_capture_proxy (TREE_PURPOSE (cap));
compound_stmt = begin_compound_stmt (0);
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a trailing-return-type, it
is as if the trailing-return-type denotes the following type:
* if the compound-statement is of the form
{ return attribute-specifier [opt] expression ; }
the type of the returned expression after lvalue-to-rvalue
conversion (_conv.lval_ 4.1), array-to-pointer conversion
(_conv.array_ 4.2), and function-to-pointer conversion
(_conv.func_ 4.3);
* otherwise, void. */
/* In a lambda that has neither a lambda-return-type-clause
nor a deducible form, errors should be reported for return statements
in the body. Since we used void as the placeholder return type, parsing
the body as usual will give such desired behavior. */
if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr)
&& cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON)
{
tree expr = NULL_TREE;
cp_id_kind idk = CP_ID_KIND_NONE;
/* Parse tentatively in case there's more after the initial return
statement. */
cp_parser_parse_tentatively (parser);
cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN);
expr = cp_parser_expression (parser, /*cast_p=*/false, &idk);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (cp_parser_parse_definitely (parser))
{
apply_lambda_return_type (lambda_expr, lambda_return_type (expr));
/* Will get error here if type not deduced yet. */
finish_return_stmt (expr);
done = true;
}
}
if (!done)
{
if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = true;
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = false;
}
finish_compound_stmt (compound_stmt);
out:
finish_function_body (body);
finish_lambda_scope ();
/* Finish the function and generate code for it if necessary. */
expand_or_defer_fn (finish_function (/*inline*/2));
}
parser->local_variables_forbidden_p = local_variables_forbidden_p;
if (nested)
pop_function_context();
else
--function_depth;
}
/* Statements [gram.stmt.stmt] */
/* Parse a statement.
statement:
labeled-statement
expression-statement
compound-statement
selection-statement
iteration-statement
jump-statement
declaration-statement
try-block
TM Extension:
statement:
atomic-statement
IN_COMPOUND is true when the statement is nested inside a
cp_parser_compound_statement; this matters for certain pragmas.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in braces
and has an else clause. This is used to implement -Wparentheses. */
static void
cp_parser_statement (cp_parser* parser, tree in_statement_expr,
bool in_compound, bool *if_p)
{
tree statement;
cp_token *token;
location_t statement_location;
restart:
if (if_p != NULL)
*if_p = false;
/* There is no statement yet. */
statement = NULL_TREE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Remember the location of the first token in the statement. */
statement_location = token->location;
/* If this is a keyword, then that will often determine what kind of
statement we have. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_CASE:
case RID_DEFAULT:
/* Looks like a labeled-statement with a case label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
case RID_IF:
case RID_SWITCH:
statement = cp_parser_selection_statement (parser, if_p);
break;
case RID_WHILE:
case RID_DO:
case RID_FOR:
statement = cp_parser_iteration_statement (parser);
break;
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
statement = cp_parser_jump_statement (parser);
break;
/* Objective-C++ exception-handling constructs. */
case RID_AT_TRY:
case RID_AT_CATCH:
case RID_AT_FINALLY:
case RID_AT_SYNCHRONIZED:
case RID_AT_THROW:
statement = cp_parser_objc_statement (parser);
break;
case RID_TRY:
statement = cp_parser_try_block (parser);
break;
case RID_NAMESPACE:
/* This must be a namespace alias definition. */
cp_parser_declaration_statement (parser);
return;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
statement = cp_parser_transaction (parser, keyword);
break;
case RID_TRANSACTION_CANCEL:
statement = cp_parser_transaction_cancel (parser);
break;
default:
/* It might be a keyword like `int' that can start a
declaration-statement. */
break;
}
}
else if (token->type == CPP_NAME)
{
/* If the next token is a `:', then we are looking at a
labeled-statement. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON)
{
/* Looks like a labeled-statement with an ordinary label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser);
goto restart;
}
}
/* Anything that starts with a `{' must be a compound-statement. */
else if (token->type == CPP_OPEN_BRACE)
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* CPP_PRAGMA is a #pragma inside a function body, which constitutes
a statement all its own. */
else if (token->type == CPP_PRAGMA)
{
/* Only certain OpenMP pragmas are attached to statements, and thus
are considered statements themselves. All others are not. In
the context of a compound, accept the pragma as a "statement" and
return so that we can check for a close brace. Otherwise we
require a real statement and must go back and read one. */
if (in_compound)
cp_parser_pragma (parser, pragma_compound);
else if (!cp_parser_pragma (parser, pragma_stmt))
goto restart;
return;
}
else if (token->type == CPP_EOF)
{
cp_parser_error (parser, "expected statement");
return;
}
/* Everything else must be a declaration-statement or an
expression-statement. Try for the declaration-statement
first, unless we are looking at a `;', in which case we know that
we have an expression-statement. */
if (!statement)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_parse_tentatively (parser);
/* Try to parse the declaration-statement. */
cp_parser_declaration_statement (parser);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return;
}
/* Look for an expression-statement instead. */
statement = cp_parser_expression_statement (parser, in_statement_expr);
}
/* Set the line number for the statement. */
if (statement && STATEMENT_CODE_P (TREE_CODE (statement)))
SET_EXPR_LOCATION (statement, statement_location);
}
/* Parse the label for a labeled-statement, i.e.
identifier :
case constant-expression :
default :
GNU Extension:
case constant-expression ... constant-expression : statement
When a label is parsed without errors, the label is added to the
parse tree by the finish_* functions, so this function doesn't
have to return the label. */
static void
cp_parser_label_for_labeled_statement (cp_parser* parser)
{
cp_token *token;
tree label = NULL_TREE;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* The next token should be an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME
&& token->type != CPP_KEYWORD)
{
cp_parser_error (parser, "expected labeled-statement");
return;
}
parser->colon_corrects_to_scope_p = false;
switch (token->keyword)
{
case RID_CASE:
{
tree expr, expr_hi;
cp_token *ellipsis;
/* Consume the `case' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the constant-expression. */
expr = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
ellipsis = cp_lexer_peek_token (parser->lexer);
if (ellipsis->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
expr_hi =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
/* We don't need to emit warnings here, as the common code
will do this for us. */
}
else
expr_hi = NULL_TREE;
if (parser->in_switch_statement_p)
finish_case_label (token->location, expr, expr_hi);
else
error_at (token->location,
"case label %qE not within a switch statement",
expr);
}
break;
case RID_DEFAULT:
/* Consume the `default' token. */
cp_lexer_consume_token (parser->lexer);
if (parser->in_switch_statement_p)
finish_case_label (token->location, NULL_TREE, NULL_TREE);
else
error_at (token->location, "case label not within a switch statement");
break;
default:
/* Anything else must be an ordinary label. */
label = finish_label_stmt (cp_parser_identifier (parser));
break;
}
/* Require the `:' token. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* An ordinary label may optionally be followed by attributes.
However, this is only permitted if the attributes are then
followed by a semicolon. This is because, for backward
compatibility, when parsing
lab: __attribute__ ((unused)) int i;
we want the attribute to attach to "i", not "lab". */
if (label != NULL_TREE
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
{
tree attrs;
cp_parser_parse_tentatively (parser);
attrs = cp_parser_attributes_opt (parser);
if (attrs == NULL_TREE
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_abort_tentative_parse (parser);
else if (!cp_parser_parse_definitely (parser))
;
else
cplus_decl_attributes (&label, attrs, 0);
}
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse an expression-statement.
expression-statement:
expression [opt] ;
Returns the new EXPR_STMT -- or NULL_TREE if the expression
statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P
indicates whether this expression-statement is part of an
expression statement. */
static tree
cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr)
{
tree statement = NULL_TREE;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a ';', then there is no expression
statement. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
statement = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* Give a helpful message for "A<T>::type t;" and the like. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
{
if (TREE_CODE (statement) == SCOPE_REF)
error_at (token->location, "need %<typename%> before %qE because "
"%qT is a dependent scope",
statement, TREE_OPERAND (statement, 0));
else if (is_overloaded_fn (statement)
&& DECL_CONSTRUCTOR_P (get_first_fn (statement)))
{
/* A::A a; */
tree fn = get_first_fn (statement);
error_at (token->location,
"%<%T::%D%> names the constructor, not the type",
DECL_CONTEXT (fn), DECL_NAME (fn));
}
}
/* Consume the final `;'. */
cp_parser_consume_semicolon_at_end_of_statement (parser);
if (in_statement_expr
&& cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
/* This is the final expression statement of a statement
expression. */
statement = finish_stmt_expr_expr (statement, in_statement_expr);
else if (statement)
statement = finish_expr_stmt (statement);
else
finish_stmt ();
return statement;
}
/* Parse a compound-statement.
compound-statement:
{ statement-seq [opt] }
GNU extension:
compound-statement:
{ label-declaration-seq [opt] statement-seq [opt] }
label-declaration-seq:
label-declaration
label-declaration-seq label-declaration
Returns a tree representing the statement. */
static tree
cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr,
bool in_try, bool function_body)
{
tree compound_stmt;
/* Consume the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return error_mark_node;
if (DECL_DECLARED_CONSTEXPR_P (current_function_decl)
&& !function_body)
pedwarn (input_location, OPT_pedantic,
"compound-statement in constexpr function");
/* Begin the compound-statement. */
compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, in_statement_expr);
/* Finish the compound-statement. */
finish_compound_stmt (compound_stmt);
/* Consume the `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return compound_stmt;
}
/* Parse an (optional) statement-seq.
statement-seq:
statement
statement-seq [opt] statement */
static void
cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr)
{
/* Scan statements until there aren't any more. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If we are looking at a `}', then we have run out of
statements; the same is true if we have reached the end
of file, or have stumbled upon a stray '@end'. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL
|| (token->type == CPP_KEYWORD && token->keyword == RID_AT_END))
break;
/* If we are in a compound statement and find 'else' then
something went wrong. */
else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE)
{
if (parser->in_statement & IN_IF_STMT)
break;
else
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "%<else%> without a previous %<if%>");
}
}
/* Parse the statement. */
cp_parser_statement (parser, in_statement_expr, true, NULL);
}
}
/* Parse a selection-statement.
selection-statement:
if ( condition ) statement
if ( condition ) statement else statement
switch ( condition ) statement
Returns the new IF_STMT or SWITCH_STMT.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses. */
static tree
cp_parser_selection_statement (cp_parser* parser, bool *if_p)
{
cp_token *token;
enum rid keyword;
if (if_p != NULL)
*if_p = false;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT);
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_IF:
case RID_SWITCH:
{
tree statement;
tree condition;
/* Look for the `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
/* Begin the selection-statement. */
if (keyword == RID_IF)
statement = begin_if_stmt ();
else
statement = begin_switch_stmt ();
/* Parse the condition. */
condition = cp_parser_condition (parser);
/* Look for the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
if (keyword == RID_IF)
{
bool nested_if;
unsigned char in_statement;
/* Add the condition. */
finish_if_stmt_cond (condition, statement);
/* Parse the then-clause. */
in_statement = parser->in_statement;
parser->in_statement |= IN_IF_STMT;
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE))
warning_at (loc, OPT_Wempty_body, "suggest braces around "
"empty body in an %<if%> statement");
nested_if = false;
}
else
cp_parser_implicitly_scoped_statement (parser, &nested_if);
parser->in_statement = in_statement;
finish_then_clause (statement);
/* If the next token is `else', parse the else-clause. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_ELSE))
{
/* Consume the `else' keyword. */
cp_lexer_consume_token (parser->lexer);
begin_else_clause (statement);
/* Parse the else-clause. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
warning_at (loc,
OPT_Wempty_body, "suggest braces around "
"empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
}
else
cp_parser_implicitly_scoped_statement (parser, NULL);
finish_else_clause (statement);
/* If we are currently parsing a then-clause, then
IF_P will not be NULL. We set it to true to
indicate that this if statement has an else clause.
This may trigger the Wparentheses warning below
when we get back up to the parent if statement. */
if (if_p != NULL)
*if_p = true;
}
else
{
/* This if statement does not have an else clause. If
NESTED_IF is true, then the then-clause is an if
statement which does have an else clause. We warn
about the potential ambiguity. */
if (nested_if)
warning_at (EXPR_LOCATION (statement), OPT_Wparentheses,
"suggest explicit braces to avoid ambiguous"
" %<else%>");
}
/* Now we're all done with the if-statement. */
finish_if_stmt (statement);
}
else
{
bool in_switch_statement_p;
unsigned char in_statement;
/* Add the condition. */
finish_switch_cond (condition, statement);
/* Parse the body of the switch-statement. */
in_switch_statement_p = parser->in_switch_statement_p;
in_statement = parser->in_statement;
parser->in_switch_statement_p = true;
parser->in_statement |= IN_SWITCH_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
/* Now we're all done with the switch-statement. */
finish_switch_stmt (statement);
}
return statement;
}
break;
default:
cp_parser_error (parser, "expected selection-statement");
return error_mark_node;
}
}
/* Parse a condition.
condition:
expression
type-specifier-seq declarator = initializer-clause
type-specifier-seq declarator braced-init-list
GNU Extension:
condition:
type-specifier-seq declarator asm-specification [opt]
attributes [opt] = assignment-expression
Returns the expression that should be tested. */
static tree
cp_parser_condition (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
const char *saved_message;
int declares_class_or_enum;
/* Try the declaration first. */
cp_parser_parse_tentatively (parser);
/* New types are not allowed in the type-specifier-seq for a
condition. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in conditions");
/* Parse the type-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR,
&type_specifiers,
&declares_class_or_enum);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* If all is well, we might be looking at a declaration. */
if (!cp_parser_error_occurred (parser))
{
tree decl;
tree asm_specification;
tree attributes;
cp_declarator *declarator;
tree initializer = NULL_TREE;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* If the next token is not an `=' or '{', then we might still be
looking at an expression. For example:
if (A(a).x)
looks like a decl-specifier-seq and a declarator -- but then
there is no `=', so this is an expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_simulate_error (parser);
/* If we did see an `=' or '{', then we are looking at a declaration
for sure. */
if (cp_parser_parse_definitely (parser))
{
tree pushed_scope;
bool non_constant_p;
bool flags = LOOKUP_ONLYCONVERTING;
/* Create the declaration. */
decl = start_decl (declarator, &type_specifiers,
/*initialized_p=*/true,
attributes, /*prefix_attributes=*/NULL_TREE,
&pushed_scope);
/* Parse the initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
initializer = cp_parser_braced_list (parser, &non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1;
flags = 0;
}
else
{
/* Consume the `='. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
initializer = cp_parser_initializer_clause (parser, &non_constant_p);
}
if (BRACE_ENCLOSED_INITIALIZER_P (initializer))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* Process the initializer. */
cp_finish_decl (decl,
initializer, !non_constant_p,
asm_specification,
flags);
if (pushed_scope)
pop_scope (pushed_scope);
return convert_from_reference (decl);
}
}
/* If we didn't even get past the declarator successfully, we are
definitely not looking at a declaration. */
else
cp_parser_abort_tentative_parse (parser);
/* Otherwise, we are looking at an expression. */
return cp_parser_expression (parser, /*cast_p=*/false, NULL);
}
/* Parses a for-statement or range-for-statement until the closing ')',
not included. */
static tree
cp_parser_for (cp_parser *parser)
{
tree init, scope, decl;
bool is_range_for;
/* Begin the for-statement. */
scope = begin_for_scope (&init);
/* Parse the initialization. */
is_range_for = cp_parser_for_init_statement (parser, &decl);
if (is_range_for)
return cp_parser_range_for (parser, scope, init, decl);
else
return cp_parser_c_for (parser, scope, init);
}
static tree
cp_parser_c_for (cp_parser *parser, tree scope, tree init)
{
/* Normal for loop */
tree condition = NULL_TREE;
tree expression = NULL_TREE;
tree stmt;
stmt = begin_for_stmt (scope, init);
/* The for-init-statement has already been parsed in
cp_parser_for_init_statement, so no work is needed here. */
finish_for_init_stmt (stmt);
/* If there's a condition, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
condition = cp_parser_condition (parser);
finish_for_cond (condition, stmt);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* If there's an expression, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
finish_for_expr (expression, stmt);
return stmt;
}
/* Tries to parse a range-based for-statement:
range-based-for:
decl-specifier-seq declarator : expression
The decl-specifier-seq declarator and the `:' are already parsed by
cp_parser_for_init_statement. If processing_template_decl it returns a
newly created RANGE_FOR_STMT; if not, it is converted to a
regular FOR_STMT. */
static tree
cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl)
{
tree stmt, range_expr;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
range_expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else
range_expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* If in template, STMT is converted to a normal for-statement
at instantiation. If not, it is done just ahead. */
if (processing_template_decl)
{
if (check_for_bare_parameter_packs (range_expr))
range_expr = error_mark_node;
stmt = begin_range_for_stmt (scope, init);
finish_range_for_decl (stmt, range_decl, range_expr);
if (range_expr != error_mark_node
&& !type_dependent_expression_p (range_expr)
/* The length of an array might be dependent. */
&& COMPLETE_TYPE_P (TREE_TYPE (range_expr))
/* do_auto_deduction doesn't mess with template init-lists. */
&& !BRACE_ENCLOSED_INITIALIZER_P (range_expr))
do_range_for_auto_deduction (range_decl, range_expr);
}
else
{
stmt = begin_for_stmt (scope, init);
stmt = cp_convert_range_for (stmt, range_decl, range_expr);
}
return stmt;
}
/* Subroutine of cp_convert_range_for: given the initializer expression,
builds up the range temporary. */
static tree
build_range_temp (tree range_expr)
{
tree range_type, range_temp;
/* Find out the type deduced by the declaration
`auto &&__range = range_expr'. */
range_type = cp_build_reference_type (make_auto (), true);
range_type = do_auto_deduction (range_type, range_expr,
type_uses_auto (range_type));
/* Create the __range variable. */
range_temp = build_decl (input_location, VAR_DECL,
get_identifier ("__for_range"), range_type);
TREE_USED (range_temp) = 1;
DECL_ARTIFICIAL (range_temp) = 1;
return range_temp;
}
/* Used by cp_parser_range_for in template context: we aren't going to
do a full conversion yet, but we still need to resolve auto in the
type of the for-range-declaration if present. This is basically
a shortcut version of cp_convert_range_for. */
static void
do_range_for_auto_deduction (tree decl, tree range_expr)
{
tree auto_node = type_uses_auto (TREE_TYPE (decl));
if (auto_node)
{
tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl;
range_temp = convert_from_reference (build_range_temp (range_expr));
iter_type = (cp_parser_perform_range_for_lookup
(range_temp, &begin_dummy, &end_dummy));
iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type);
iter_decl = build_x_indirect_ref (iter_decl, RO_NULL,
tf_warning_or_error);
TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl),
iter_decl, auto_node);
}
}
/* Converts a range-based for-statement into a normal
for-statement, as per the definition.
for (RANGE_DECL : RANGE_EXPR)
BLOCK
should be equivalent to:
{
auto &&__range = RANGE_EXPR;
for (auto __begin = BEGIN_EXPR, end = END_EXPR;
__begin != __end;
++__begin)
{
RANGE_DECL = *__begin;
BLOCK
}
}
If RANGE_EXPR is an array:
BEGIN_EXPR = __range
END_EXPR = __range + ARRAY_SIZE(__range)
Else if RANGE_EXPR has a member 'begin' or 'end':
BEGIN_EXPR = __range.begin()
END_EXPR = __range.end()
Else:
BEGIN_EXPR = begin(__range)
END_EXPR = end(__range);
If __range has a member 'begin' but not 'end', or vice versa, we must
still use the second alternative (it will surely fail, however).
When calling begin()/end() in the third alternative we must use
argument dependent lookup, but always considering 'std' as an associated
namespace. */
tree
cp_convert_range_for (tree statement, tree range_decl, tree range_expr)
{
tree begin, end;
tree iter_type, begin_expr, end_expr;
tree condition, expression;
if (range_decl == error_mark_node || range_expr == error_mark_node)
/* If an error happened previously do nothing or else a lot of
unhelpful errors would be issued. */
begin_expr = end_expr = iter_type = error_mark_node;
else
{
tree range_temp = build_range_temp (range_expr);
pushdecl (range_temp);
cp_finish_decl (range_temp, range_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
range_temp = convert_from_reference (range_temp);
iter_type = cp_parser_perform_range_for_lookup (range_temp,
&begin_expr, &end_expr);
}
/* The new for initialization statement. */
begin = build_decl (input_location, VAR_DECL,
get_identifier ("__for_begin"), iter_type);
TREE_USED (begin) = 1;
DECL_ARTIFICIAL (begin) = 1;
pushdecl (begin);
cp_finish_decl (begin, begin_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
end = build_decl (input_location, VAR_DECL,
get_identifier ("__for_end"), iter_type);
TREE_USED (end) = 1;
DECL_ARTIFICIAL (end) = 1;
pushdecl (end);
cp_finish_decl (end, end_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
finish_for_init_stmt (statement);
/* The new for condition. */
condition = build_x_binary_op (NE_EXPR,
begin, ERROR_MARK,
end, ERROR_MARK,
NULL, tf_warning_or_error);
finish_for_cond (condition, statement);
/* The new increment expression. */
expression = finish_unary_op_expr (PREINCREMENT_EXPR, begin);
finish_for_expr (expression, statement);
/* The declaration is initialized with *__begin inside the loop body. */
cp_finish_decl (range_decl,
build_x_indirect_ref (begin, RO_NULL, tf_warning_or_error),
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
return statement;
}
/* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for.
We need to solve both at the same time because the method used
depends on the existence of members begin or end.
Returns the type deduced for the iterator expression. */
static tree
cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end)
{
if (error_operand_p (range))
{
*begin = *end = error_mark_node;
return error_mark_node;
}
if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range))))
{
error ("range-based %<for%> expression of type %qT "
"has incomplete type", TREE_TYPE (range));
*begin = *end = error_mark_node;
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE)
{
/* If RANGE is an array, we will use pointer arithmetic. */
*begin = range;
*end = build_binary_op (input_location, PLUS_EXPR,
range,
array_type_nelts_top (TREE_TYPE (range)),
0);
return build_pointer_type (TREE_TYPE (TREE_TYPE (range)));
}
else
{
/* If it is not an array, we must do a bit of magic. */
tree id_begin, id_end;
tree member_begin, member_end;
*begin = *end = error_mark_node;
id_begin = get_identifier ("begin");
id_end = get_identifier ("end");
member_begin = lookup_member (TREE_TYPE (range), id_begin,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
member_end = lookup_member (TREE_TYPE (range), id_end,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
if (member_begin != NULL_TREE || member_end != NULL_TREE)
{
/* Use the member functions. */
if (member_begin != NULL_TREE)
*begin = cp_parser_range_for_member_function (range, id_begin);
else
error ("range-based %<for%> expression of type %qT has an "
"%<end%> member but not a %<begin%>", TREE_TYPE (range));
if (member_end != NULL_TREE)
*end = cp_parser_range_for_member_function (range, id_end);
else
error ("range-based %<for%> expression of type %qT has a "
"%<begin%> member but not an %<end%>", TREE_TYPE (range));
}
else
{
/* Use global functions with ADL. */
VEC(tree,gc) *vec;
vec = make_tree_vector ();
VEC_safe_push (tree, gc, vec, range);
member_begin = perform_koenig_lookup (id_begin, vec,
/*include_std=*/true,
tf_warning_or_error);
*begin = finish_call_expr (member_begin, &vec, false, true,
tf_warning_or_error);
member_end = perform_koenig_lookup (id_end, vec,
/*include_std=*/true,
tf_warning_or_error);
*end = finish_call_expr (member_end, &vec, false, true,
tf_warning_or_error);
release_tree_vector (vec);
}
/* Last common checks. */
if (*begin == error_mark_node || *end == error_mark_node)
{
/* If one of the expressions is an error do no more checks. */
*begin = *end = error_mark_node;
return error_mark_node;
}
else
{
tree iter_type = cv_unqualified (TREE_TYPE (*begin));
/* The unqualified type of the __begin and __end temporaries should
be the same, as required by the multiple auto declaration. */
if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end))))
error ("inconsistent begin/end types in range-based %<for%> "
"statement: %qT and %qT",
TREE_TYPE (*begin), TREE_TYPE (*end));
return iter_type;
}
}
}
/* Helper function for cp_parser_perform_range_for_lookup.
Builds a tree for RANGE.IDENTIFIER(). */
static tree
cp_parser_range_for_member_function (tree range, tree identifier)
{
tree member, res;
VEC(tree,gc) *vec;
member = finish_class_member_access_expr (range, identifier,
false, tf_warning_or_error);
if (member == error_mark_node)
return error_mark_node;
vec = make_tree_vector ();
res = finish_call_expr (member, &vec,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
tf_warning_or_error);
release_tree_vector (vec);
return res;
}
/* Parse an iteration-statement.
iteration-statement:
while ( condition ) statement
do statement while ( expression ) ;
for ( for-init-statement condition [opt] ; expression [opt] )
statement
Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */
static tree
cp_parser_iteration_statement (cp_parser* parser)
{
cp_token *token;
enum rid keyword;
tree statement;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION);
if (!token)
return error_mark_node;
/* Remember whether or not we are already within an iteration
statement. */
in_statement = parser->in_statement;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_WHILE:
{
tree condition;
/* Begin the while-statement. */
statement = begin_while_stmt ();
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the condition. */
condition = cp_parser_condition (parser);
finish_while_stmt_cond (condition, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the dependent statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the while-statement. */
finish_while_stmt (statement);
}
break;
case RID_DO:
{
tree expression;
/* Begin the do-statement. */
statement = begin_do_stmt ();
/* Parse the body of the do-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_statement = in_statement;
finish_do_body (statement);
/* Look for the `while' keyword. */
cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* We're done with the do-statement. */
finish_do_stmt (expression, statement);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_FOR:
{
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
statement = cp_parser_for (parser);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the body of the for-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the for-statement. */
finish_for_stmt (statement);
}
break;
default:
cp_parser_error (parser, "expected iteration-statement");
statement = error_mark_node;
break;
}
return statement;
}
/* Parse a for-init-statement or the declarator of a range-based-for.
Returns true if a range-based-for declaration is seen.
for-init-statement:
expression-statement
simple-declaration */
static bool
cp_parser_for_init_statement (cp_parser* parser, tree *decl)
{
/* If the next token is a `;', then we have an empty
expression-statement. Grammatically, this is also a
simple-declaration, but an invalid one, because it does not
declare anything. Therefore, if we did not handle this case
specially, we would issue an error message about an invalid
declaration. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
bool is_range_for = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* We're going to speculatively look for a declaration, falling back
to an expression, if necessary. */
cp_parser_parse_tentatively (parser);
/* Parse the declaration. */
cp_parser_simple_declaration (parser,
/*function_definition_allowed_p=*/false,
decl);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* It is a range-for, consume the ':' */
cp_lexer_consume_token (parser->lexer);
is_range_for = true;
if (cxx_dialect < cxx0x)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"range-based %<for%> loops are not allowed "
"in C++98 mode");
*decl = error_mark_node;
}
}
else
/* The ';' is not consumed yet because we told
cp_parser_simple_declaration not to. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (cp_parser_parse_definitely (parser))
return is_range_for;
/* If the tentative parse failed, then we shall need to look for an
expression-statement. */
}
/* If we are here, it is an expression-statement. */
cp_parser_expression_statement (parser, NULL_TREE);
return false;
}
/* Parse a jump-statement.
jump-statement:
break ;
continue ;
return expression [opt] ;
return braced-init-list ;
goto identifier ;
GNU extension:
jump-statement:
goto * expression ;
Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */
static tree
cp_parser_jump_statement (cp_parser* parser)
{
tree statement = error_mark_node;
cp_token *token;
enum rid keyword;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP);
if (!token)
return error_mark_node;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_BREAK:
in_statement = parser->in_statement & ~IN_IF_STMT;
switch (in_statement)
{
case 0:
error_at (token->location, "break statement not within loop or switch");
break;
default:
gcc_assert ((in_statement & IN_SWITCH_STMT)
|| in_statement == IN_ITERATION_STMT);
statement = finish_break_stmt ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
case IN_OMP_FOR:
error_at (token->location, "break statement used with OpenMP for loop");
break;
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_CONTINUE:
switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT))
{
case 0:
error_at (token->location, "continue statement not within a loop");
break;
case IN_ITERATION_STMT:
case IN_OMP_FOR:
statement = finish_continue_stmt ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
default:
gcc_unreachable ();
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_RETURN:
{
tree expr;
bool expr_non_constant_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
else
/* If the next token is a `;', then there is no
expression. */
expr = NULL_TREE;
/* Build the return-statement. */
statement = finish_return_stmt (expr);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_GOTO:
/* Create the goto-statement. */
if (cp_lexer_next_token_is (parser->lexer, CPP_MULT))
{
/* Issue a warning about this use of a GNU extension. */
pedwarn (token->location, OPT_pedantic, "ISO C++ forbids computed gotos");
/* Consume the '*' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the dependent expression. */
finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false, NULL));
}
else
finish_goto_stmt (cp_parser_identifier (parser));
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
default:
cp_parser_error (parser, "expected jump-statement");
break;
}
return statement;
}
/* Parse a declaration-statement.
declaration-statement:
block-declaration */
static void
cp_parser_declaration_statement (cp_parser* parser)
{
void *p;
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* Parse the block-declaration. */
cp_parser_block_declaration (parser, /*statement_p=*/true);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
/* Finish off the statement. */
finish_stmt ();
}
/* Some dependent statements (like `if (cond) statement'), are
implicitly in their own scope. In other words, if the statement is
a single statement (as opposed to a compound-statement), it is
none-the-less treated as if it were enclosed in braces. Any
declarations appearing in the dependent statement are out of scope
after control passes that point. This function parses a statement,
but ensures that is in its own scope, even if it is not a
compound-statement.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses.
Returns the new statement. */
static tree
cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p)
{
tree statement;
if (if_p != NULL)
*if_p = false;
/* Mark if () ; with a special NOP_EXPR. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer);
statement = add_stmt (build_empty_stmt (loc));
}
/* if a compound is opened, we simply parse the statement directly. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* If the token is not a `{', then we must take special action. */
else
{
/* Create a compound-statement. */
statement = begin_compound_stmt (0);
/* Parse the dependent-statement. */
cp_parser_statement (parser, NULL_TREE, false, if_p);
/* Finish the dummy compound-statement. */
finish_compound_stmt (statement);
}
/* Return the statement. */
return statement;
}
/* For some dependent statements (like `while (cond) statement'), we
have already created a scope. Therefore, even if the dependent
statement is a compound-statement, we do not want to create another
scope. */
static void
cp_parser_already_scoped_statement (cp_parser* parser)
{
/* If the token is a `{', then we must take special action. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_statement (parser, NULL_TREE, false, NULL);
else
{
/* Avoid calling cp_parser_compound_statement, so that we
don't create a new scope. Do everything else by hand. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
}
/* Declarations [gram.dcl.dcl] */
/* Parse an optional declaration-sequence.
declaration-seq:
declaration
declaration-seq declaration */
static void
cp_parser_declaration_seq_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
if (token->type == CPP_SEMICOLON)
{
/* A declaration consisting of a single semicolon is
invalid. Allow it unless we're being pedantic. */
cp_lexer_consume_token (parser->lexer);
if (!in_system_header)
pedwarn (input_location, OPT_pedantic, "extra %<;%>");
continue;
}
/* If we're entering or exiting a region that's implicitly
extern "C", modify the lang context appropriately. */
if (!parser->implicit_extern_c && token->implicit_extern_c)
{
push_lang_context (lang_name_c);
parser->implicit_extern_c = true;
}
else if (parser->implicit_extern_c && !token->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
if (token->type == CPP_PRAGMA)
{
/* A top-level declaration can consist solely of a #pragma.
A nested declaration cannot, so this is done here and not
in cp_parser_declaration. (A #pragma at block scope is
handled in cp_parser_statement.) */
cp_parser_pragma (parser, pragma_external);
continue;
}
/* Parse the declaration itself. */
cp_parser_declaration (parser);
}
}
/* Parse a declaration.
declaration:
block-declaration
function-definition
template-declaration
explicit-instantiation
explicit-specialization
linkage-specification
namespace-definition
GNU extension:
declaration:
__extension__ declaration */
static void
cp_parser_declaration (cp_parser* parser)
{
cp_token token1;
cp_token token2;
int saved_pedantic;
void *p;
tree attributes = NULL_TREE;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_declaration (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Try to figure out what kind of declaration is present. */
token1 = *cp_lexer_peek_token (parser->lexer);
if (token1.type != CPP_EOF)
token2 = *cp_lexer_peek_nth_token (parser->lexer, 2);
else
{
token2.type = CPP_EOF;
token2.keyword = RID_MAX;
}
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token1.keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal (&token2))
cp_parser_linkage_specification (parser);
/* If the next token is `template', then we have either a template
declaration, an explicit instantiation, or an explicit
specialization. */
else if (token1.keyword == RID_TEMPLATE)
{
/* `template <>' indicates a template specialization. */
if (token2.type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
/* `template <' indicates a template declaration. */
else if (token2.type == CPP_LESS)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* Anything else must be an explicit instantiation. */
else
cp_parser_explicit_instantiation (parser);
}
/* If the next token is `export', then we have a template
declaration. */
else if (token1.keyword == RID_EXPORT)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* If the next token is `extern', 'static' or 'inline' and the one
after that is `template', we have a GNU extended explicit
instantiation directive. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& (token1.keyword == RID_EXTERN
|| token1.keyword == RID_STATIC
|| token1.keyword == RID_INLINE)
&& token2.keyword == RID_TEMPLATE)
cp_parser_explicit_instantiation (parser);
/* If the next token is `namespace', check for a named or unnamed
namespace definition. */
else if (token1.keyword == RID_NAMESPACE
&& (/* A named namespace definition. */
(token2.type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_EQ))
/* An unnamed namespace definition. */
|| token2.type == CPP_OPEN_BRACE
|| token2.keyword == RID_ATTRIBUTE))
cp_parser_namespace_definition (parser);
/* An inline (associated) namespace definition. */
else if (token1.keyword == RID_INLINE
&& token2.keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Objective-C++ declaration/definition. */
else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword))
cp_parser_objc_declaration (parser, NULL_TREE);
else if (c_dialect_objc ()
&& token1.keyword == RID_ATTRIBUTE
&& cp_parser_objc_valid_prefix_attributes (parser, &attributes))
cp_parser_objc_declaration (parser, attributes);
/* We must have either a block declaration or a function
definition. */
else
/* Try to parse a block-declaration, or a function-definition. */
cp_parser_block_declaration (parser, /*statement_p=*/false);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* Parse a block-declaration.
block-declaration:
simple-declaration
asm-definition
namespace-alias-definition
using-declaration
using-directive
GNU Extension:
block-declaration:
__extension__ block-declaration
C++0x Extension:
block-declaration:
static_assert-declaration
If STATEMENT_P is TRUE, then this block-declaration is occurring as
part of a declaration-statement. */
static void
cp_parser_block_declaration (cp_parser *parser,
bool statement_p)
{
cp_token *token1;
int saved_pedantic;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_block_declaration (parser, statement_p);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Peek at the next token to figure out which kind of declaration is
present. */
token1 = cp_lexer_peek_token (parser->lexer);
/* If the next keyword is `asm', we have an asm-definition. */
if (token1->keyword == RID_ASM)
{
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
cp_parser_asm_definition (parser);
}
/* If the next keyword is `namespace', we have a
namespace-alias-definition. */
else if (token1->keyword == RID_NAMESPACE)
cp_parser_namespace_alias_definition (parser);
/* If the next keyword is `using', we have a
using-declaration, a using-directive, or an alias-declaration. */
else if (token1->keyword == RID_USING)
{
cp_token *token2;
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
/* If the token after `using' is `namespace', then we have a
using-directive. */
token2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token2->keyword == RID_NAMESPACE)
cp_parser_using_directive (parser);
/* If the second token after 'using' is '=', then we have an
alias-declaration. */
else if (cxx_dialect >= cxx0x
&& token2->type == CPP_NAME
&& ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
|| (cp_lexer_peek_nth_token (parser->lexer, 3)->keyword
== RID_ATTRIBUTE)))
cp_parser_alias_declaration (parser);
/* Otherwise, it's a using-declaration. */
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
}
/* If the next keyword is `__label__' we have a misplaced label
declaration. */
else if (token1->keyword == RID_LABEL)
{
cp_lexer_consume_token (parser->lexer);
error_at (token1->location, "%<__label__%> not at the beginning of a block");
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* If the next token is `static_assert' we have a static assertion. */
else if (token1->keyword == RID_STATIC_ASSERT)
cp_parser_static_assert (parser, /*member_p=*/false);
/* Anything else must be a simple-declaration. */
else
cp_parser_simple_declaration (parser, !statement_p,
/*maybe_range_for_decl*/NULL);
}
/* Parse a simple-declaration.
simple-declaration:
decl-specifier-seq [opt] init-declarator-list [opt] ;
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a
function-definition as a simple-declaration.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. */
static void
cp_parser_simple_declaration (cp_parser* parser,
bool function_definition_allowed_p,
tree *maybe_range_for_decl)
{
cp_decl_specifier_seq decl_specifiers;
int declares_class_or_enum;
bool saw_declarator;
if (maybe_range_for_decl)
*maybe_range_for_decl = NULL_TREE;
/* Defer access checks until we know what is being declared; the
checks for names appearing in the decl-specifier-seq should be
done as if we were in the scope of the thing being declared. */
push_deferring_access_checks (dk_deferred);
/* Parse the decl-specifier-seq. We have to keep track of whether
or not the decl-specifier-seq declares a named class or
enumeration type, since that is the only case in which the
init-declarator-list is allowed to be empty.
[dcl.dcl]
In a simple-declaration, the optional init-declarator-list can be
omitted only when declaring a class or enumeration, that is when
the decl-specifier-seq contains either a class-specifier, an
elaborated-type-specifier, or an enum-specifier. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* We no longer need to defer access checks. */
stop_deferring_access_checks ();
/* In a block scope, a valid declaration must always have a
decl-specifier-seq. By not trying to parse declarators, we can
resolve the declaration/expression ambiguity more quickly. */
if (!function_definition_allowed_p
&& !decl_specifiers.any_specifiers_p)
{
cp_parser_error (parser, "expected declaration");
goto done;
}
/* If the next two tokens are both identifiers, the code is
erroneous. The usual cause of this situation is code like:
T t;
where "T" should name a type -- but does not. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* If parsing tentatively, we should commit; we really are
looking at a declaration. */
cp_parser_commit_to_tentative_parse (parser);
/* Give up. */
goto done;
}
/* If we have seen at least one decl-specifier, and the next token
is not a parenthesis, then we must be looking at a declaration.
(After "int (" we might be looking at a functional cast.) */
if (decl_specifiers.any_specifiers_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& !cp_parser_error_occurred (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Keep going until we hit the `;' at the end of the simple
declaration. */
saw_declarator = false;
while (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
cp_token *token;
bool function_definition_p;
tree decl;
if (saw_declarator)
{
/* If we are processing next declarator, coma is expected */
token = cp_lexer_peek_token (parser->lexer);
gcc_assert (token->type == CPP_COMMA);
cp_lexer_consume_token (parser->lexer);
if (maybe_range_for_decl)
*maybe_range_for_decl = error_mark_node;
}
else
saw_declarator = true;
/* Parse the init-declarator. */
decl = cp_parser_init_declarator (parser, &decl_specifiers,
/*checks=*/NULL,
function_definition_allowed_p,
/*member_p=*/false,
declares_class_or_enum,
&function_definition_p,
maybe_range_for_decl);
/* If an error occurred while parsing tentatively, exit quickly.
(That usually happens when in the body of a function; each
statement is treated as a declaration-statement until proven
otherwise.) */
if (cp_parser_error_occurred (parser))
goto done;
/* Handle function definitions specially. */
if (function_definition_p)
{
/* If the next token is a `,', then we are probably
processing something like:
void f() {}, *p;
which is erroneous. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"mixing"
" declarations and function-definitions is forbidden");
}
/* Otherwise, we're done with the list of declarators. */
else
{
pop_deferring_access_checks ();
return;
}
}
if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE)
*maybe_range_for_decl = decl;
/* The next token should be either a `,' or a `;'. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', there are more declarators to come. */
if (token->type == CPP_COMMA)
/* will be consumed next time around */;
/* If it's a `;', we are done. */
else if (token->type == CPP_SEMICOLON || maybe_range_for_decl)
break;
/* Anything else is an error. */
else
{
/* If we have already issued an error message we don't need
to issue another one. */
if (decl != error_mark_node
|| cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_error (parser, "expected %<,%> or %<;%>");
/* Skip tokens until we reach the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto done;
}
/* After the first time around, a function-definition is not
allowed -- even if it was OK at first. For example:
int i, f() {}
is not valid. */
function_definition_allowed_p = false;
}
/* Issue an error message if no declarators are present, and the
decl-specifier-seq does not itself declare a class or
enumeration. */
if (!saw_declarator)
{
if (cp_parser_declares_only_class_p (parser))
shadow_tag (&decl_specifiers);
/* Perform any deferred access checks. */
perform_deferred_access_checks ();
}
/* Consume the `;'. */
if (!maybe_range_for_decl)
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
done:
pop_deferring_access_checks ();
}
/* Parse a decl-specifier-seq.
decl-specifier-seq:
decl-specifier-seq [opt] decl-specifier
decl-specifier:
storage-class-specifier
type-specifier
function-specifier
friend
typedef
GNU Extension:
decl-specifier:
attributes
Set *DECL_SPECS to a representation of the decl-specifier-seq.
The parser flags FLAGS is used to control type-specifier parsing.
*DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following
flags:
1: one of the decl-specifiers is an elaborated-type-specifier
(i.e., a type declaration)
2: one of the decl-specifiers is an enum-specifier or a
class-specifier (i.e., a type definition)
*/
static void
cp_parser_decl_specifier_seq (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
int* declares_class_or_enum)
{
bool constructor_possible_p = !parser->in_declarator_p;
cp_token *start_token = NULL;
/* Clear DECL_SPECS. */
clear_decl_specs (decl_specs);
/* Assume no class or enumeration type is declared. */
*declares_class_or_enum = 0;
/* Keep reading specifiers until there are no more to read. */
while (true)
{
bool constructor_p;
bool found_decl_spec;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Save the first token of the decl spec list for error
reporting. */
if (!start_token)
start_token = token;
/* Handle attributes. */
if (token->keyword == RID_ATTRIBUTE)
{
/* Parse the attributes. */
decl_specs->attributes
= chainon (decl_specs->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* Assume we will find a decl-specifier keyword. */
found_decl_spec = true;
/* If the next token is an appropriate keyword, we can simply
add it to the list. */
switch (token->keyword)
{
/* decl-specifier:
friend
constexpr */
case RID_FRIEND:
if (!at_class_scope_p ())
{
error_at (token->location, "%<friend%> used outside of class");
cp_lexer_purge_token (parser->lexer);
}
else
{
++decl_specs->specs[(int) ds_friend];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
break;
case RID_CONSTEXPR:
++decl_specs->specs[(int) ds_constexpr];
cp_lexer_consume_token (parser->lexer);
break;
/* function-specifier:
inline
virtual
explicit */
case RID_INLINE:
case RID_VIRTUAL:
case RID_EXPLICIT:
cp_parser_function_specifier_opt (parser, decl_specs);
break;
/* decl-specifier:
typedef */
case RID_TYPEDEF:
++decl_specs->specs[(int) ds_typedef];
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* A constructor declarator cannot appear in a typedef. */
constructor_possible_p = false;
/* The "typedef" keyword can only occur in a declaration; we
may as well commit at this point. */
cp_parser_commit_to_tentative_parse (parser);
if (decl_specs->storage_class != sc_none)
decl_specs->conflicting_specifiers_p = true;
break;
/* storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
thread */
case RID_AUTO:
if (cxx_dialect == cxx98)
{
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Complain about `auto' as a storage specifier, if
we're complaining about C++0x compatibility. */
warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>"
" changes meaning in C++11; please remove it");
/* Set the storage class anyway. */
cp_parser_set_storage_class (parser, decl_specs, RID_AUTO,
token->location);
}
else
/* C++0x auto type-specifier. */
found_decl_spec = false;
break;
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_set_storage_class (parser, decl_specs, token->keyword,
token->location);
break;
case RID_THREAD:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
++decl_specs->specs[(int) ds_thread];
break;
default:
/* We did not yet find a decl-specifier yet. */
found_decl_spec = false;
break;
}
if (found_decl_spec
&& (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR)
&& token->keyword != RID_CONSTEXPR)
error ("decl-specifier invalid in condition");
/* Constructors are a special case. The `S' in `S()' is not a
decl-specifier; it is the beginning of the declarator. */
constructor_p
= (!found_decl_spec
&& constructor_possible_p
&& (cp_parser_constructor_declarator_p
(parser, decl_specs->specs[(int) ds_friend] != 0)));
/* If we don't have a DECL_SPEC yet, then we must be looking at
a type-specifier. */
if (!found_decl_spec && !constructor_p)
{
int decl_spec_declares_class_or_enum;
bool is_cv_qualifier;
tree type_spec;
type_spec
= cp_parser_type_specifier (parser, flags,
decl_specs,
/*is_declaration=*/true,
&decl_spec_declares_class_or_enum,
&is_cv_qualifier);
*declares_class_or_enum |= decl_spec_declares_class_or_enum;
/* If this type-specifier referenced a user-defined type
(a typedef, class-name, etc.), then we can't allow any
more such type-specifiers henceforth.
[dcl.spec]
The longest sequence of decl-specifiers that could
possibly be a type name is taken as the
decl-specifier-seq of a declaration. The sequence shall
be self-consistent as described below.
[dcl.type]
As a general rule, at most one type-specifier is allowed
in the complete decl-specifier-seq of a declaration. The
only exceptions are the following:
-- const or volatile can be combined with any other
type-specifier.
-- signed or unsigned can be combined with char, long,
short, or int.
-- ..
Example:
typedef char* Pc;
void g (const int Pc);
Here, Pc is *not* part of the decl-specifier seq; it's
the declarator. Therefore, once we see a type-specifier
(other than a cv-qualifier), we forbid any additional
user-defined types. We *do* still allow things like `int
int' to be considered a decl-specifier-seq, and issue the
error message later. */
if (type_spec && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
/* A constructor declarator cannot follow a type-specifier. */
if (type_spec)
{
constructor_possible_p = false;
found_decl_spec = true;
if (!is_cv_qualifier)
decl_specs->any_type_specifiers_p = true;
}
}
/* If we still do not have a DECL_SPEC, then there are no more
decl-specifiers. */
if (!found_decl_spec)
break;
decl_specs->any_specifiers_p = true;
/* After we see one decl-specifier, further decl-specifiers are
always optional. */
flags |= CP_PARSER_FLAGS_OPTIONAL;
}
cp_parser_check_decl_spec (decl_specs, start_token->location);
/* Don't allow a friend specifier with a class definition. */
if (decl_specs->specs[(int) ds_friend] != 0
&& (*declares_class_or_enum & 2))
error_at (start_token->location,
"class definition may not be declared a friend");
}
/* Parse an (optional) storage-class-specifier.
storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
storage-class-specifier:
thread
Returns an IDENTIFIER_NODE corresponding to the keyword used. */
static tree
cp_parser_storage_class_specifier_opt (cp_parser* parser)
{
switch (cp_lexer_peek_token (parser->lexer)->keyword)
{
case RID_AUTO:
if (cxx_dialect != cxx98)
return NULL_TREE;
/* Fall through for C++98. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
default:
return NULL_TREE;
}
}
/* Parse an (optional) function-specifier.
function-specifier:
inline
virtual
explicit
Returns an IDENTIFIER_NODE corresponding to the keyword used.
Updates DECL_SPECS, if it is non-NULL. */
static tree
cp_parser_function_specifier_opt (cp_parser* parser,
cp_decl_specifier_seq *decl_specs)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->keyword)
{
case RID_INLINE:
if (decl_specs)
++decl_specs->specs[(int) ds_inline];
break;
case RID_VIRTUAL:
/* 14.5.2.3 [temp.mem]
A member function template shall not be virtual. */
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
error_at (token->location, "templates may not be %<virtual%>");
else if (decl_specs)
++decl_specs->specs[(int) ds_virtual];
break;
case RID_EXPLICIT:
if (decl_specs)
++decl_specs->specs[(int) ds_explicit];
break;
default:
return NULL_TREE;
}
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* Parse a linkage-specification.
linkage-specification:
extern string-literal { declaration-seq [opt] }
extern string-literal declaration */
static void
cp_parser_linkage_specification (cp_parser* parser)
{
tree linkage;
/* Look for the `extern' keyword. */
cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN);
/* Look for the string-literal. */
linkage = cp_parser_string_literal (parser, false, false);
/* Transform the literal into an identifier. If the literal is a
wide-character string, or contains embedded NULs, then we can't
handle it as the user wants. */
if (strlen (TREE_STRING_POINTER (linkage))
!= (size_t) (TREE_STRING_LENGTH (linkage) - 1))
{
cp_parser_error (parser, "invalid linkage-specification");
/* Assume C++ linkage. */
linkage = lang_name_cplusplus;
}
else
linkage = get_identifier (TREE_STRING_POINTER (linkage));
/* We're now using the new linkage. */
push_lang_context (linkage);
/* If the next token is a `{', then we're using the first
production. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the declarations. */
cp_parser_declaration_seq_opt (parser);
/* Look for the closing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Otherwise, there's just one declaration. */
else
{
bool saved_in_unbraced_linkage_specification_p;
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = true;
cp_parser_declaration (parser);
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
}
/* We're done with the linkage-specification. */
pop_lang_context ();
}
/* Parse a static_assert-declaration.
static_assert-declaration:
static_assert ( constant-expression , string-literal ) ;
If MEMBER_P, this static_assert is a class member. */
static void
cp_parser_static_assert(cp_parser *parser, bool member_p)
{
tree condition;
tree message;
cp_token *token;
location_t saved_loc;
bool dummy;
/* Peek at the `static_assert' token so we can keep track of exactly
where the static assertion started. */
token = cp_lexer_peek_token (parser->lexer);
saved_loc = token->location;
/* Look for the `static_assert' keyword. */
if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT,
RT_STATIC_ASSERT))
return;
/* We know we are in a static assertion; commit to any tentative
parse. */
if (cp_parser_parsing_tentatively (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the `(' starting the static assertion condition. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the constant-expression. Allow a non-constant expression
here in order to give better diagnostics in finish_static_assert. */
condition =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
/*non_constant_p=*/&dummy);
/* Parse the separating `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Parse the string-literal message. */
message = cp_parser_string_literal (parser,
/*translate=*/false,
/*wide_ok=*/true);
/* A `)' completes the static assertion. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
/* A semicolon terminates the declaration. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Complete the static assertion, which may mean either processing
the static assert now or saving it for template instantiation. */
finish_static_assert (condition, message, saved_loc, member_p);
}
/* Parse a `decltype' type. Returns the type.
simple-type-specifier:
decltype ( expression ) */
static tree
cp_parser_decltype (cp_parser *parser)
{
tree expr;
bool id_expression_or_member_access_p = false;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
cp_token *id_expr_start_token;
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
if (start_token->type == CPP_DECLTYPE)
{
/* Already parsed. */
cp_lexer_consume_token (parser->lexer);
return start_token->u.value;
}
/* Look for the `decltype' token. */
if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE))
return error_mark_node;
/* Types cannot be defined in a `decltype' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
parser->type_definition_forbidden_message
= G_("types may not be defined in %<decltype%> expressions");
/* The restrictions on constant-expressions do not apply inside
decltype expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
/* Do not warn about problems with the expression. */
++c_inhibit_evaluation_warnings;
/* Parse the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return error_mark_node;
/* First, try parsing an id-expression. */
id_expr_start_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
expr = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (!cp_parser_error_occurred (parser) && expr != error_mark_node)
{
bool non_integral_constant_expression_p = false;
tree id_expression = expr;
cp_id_kind idk;
const char *error_msg;
if (TREE_CODE (expr) == IDENTIFIER_NODE)
/* Lookup the name we got back from the id-expression. */
expr = cp_parser_lookup_name (parser, expr,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
id_expr_start_token->location);
if (expr
&& expr != error_mark_node
&& TREE_CODE (expr) != TEMPLATE_ID_EXPR
&& TREE_CODE (expr) != TYPE_DECL
&& (TREE_CODE (expr) != BIT_NOT_EXPR
|| !TYPE_P (TREE_OPERAND (expr, 0)))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
{
/* Complete lookup of the id-expression. */
expr = (finish_id_expression
(id_expression, expr, parser->scope, &idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/true,
&non_integral_constant_expression_p,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
id_expr_start_token->location));
if (expr == error_mark_node)
/* We found an id-expression, but it was something that we
should not have found. This is an error, not something
we can recover from, so note that we found an
id-expression and we'll recover as gracefully as
possible. */
id_expression_or_member_access_p = true;
}
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (!id_expression_or_member_access_p)
{
/* Abort the id-expression parse. */
cp_parser_abort_tentative_parse (parser);
/* Parsing tentatively, again. */
cp_parser_parse_tentatively (parser);
/* Parse a class member access. */
expr = cp_parser_postfix_expression (parser, /*address_p=*/false,
/*cast_p=*/false,
/*member_access_only_p=*/true, NULL);
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (id_expression_or_member_access_p)
/* We have parsed the complete id-expression or member access. */
cp_parser_parse_definitely (parser);
else
{
bool saved_greater_than_is_operator_p;
/* Abort our attempt to parse an id-expression or member access
expression. */
cp_parser_abort_tentative_parse (parser);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Parse a full expression. */
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
}
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* Restore the old message and the integral constant expression
flags. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
/* Parse to the closing `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return error_mark_node;
}
expr = finish_decltype_type (expr, id_expression_or_member_access_p,
tf_warning_or_error);
/* Replace the decltype with a CPP_DECLTYPE so we don't need to parse
it again. */
start_token->type = CPP_DECLTYPE;
start_token->u.value = expr;
start_token->keyword = RID_MAX;
cp_lexer_purge_tokens_after (parser->lexer, start_token);
return expr;
}
/* Special member functions [gram.special] */
/* Parse a conversion-function-id.
conversion-function-id:
operator conversion-type-id
Returns an IDENTIFIER_NODE representing the operator. */
static tree
cp_parser_conversion_function_id (cp_parser* parser)
{
tree type;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree pushed_scope = NULL_TREE;
/* Look for the `operator' token. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* When we parse the conversion-type-id, the current scope will be
reset. However, we need that information in able to look up the
conversion function later, so we save it here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We must enter the scope of the class so that the names of
entities declared within the class are available in the
conversion-type-id. For example, consider:
struct S {
typedef int I;
operator I();
};
S::operator I() { ... }
In order to see that `I' is a type-name in the definition, we
must be in the scope of `S'. */
if (saved_scope)
pushed_scope = push_scope (saved_scope);
/* Parse the conversion-type-id. */
type = cp_parser_conversion_type_id (parser);
/* Leave the scope of the class, if any. */
if (pushed_scope)
pop_scope (pushed_scope);
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If the TYPE is invalid, indicate failure. */
if (type == error_mark_node)
return error_mark_node;
return mangle_conv_op_name_for_type (type);
}
/* Parse a conversion-type-id:
conversion-type-id:
type-specifier-seq conversion-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_conversion_type_id (cp_parser* parser)
{
tree attributes;
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
tree type_specified;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the type-specifiers. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
/* If that didn't work, stop. */
if (type_specifiers.type == error_mark_node)
return error_mark_node;
/* Parse the conversion-declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME,
/*initialized=*/0, &attributes);
if (attributes)
cplus_decl_attributes (&type_specified, attributes, /*flags=*/0);
/* Don't give this error when parsing tentatively. This happens to
work because we always parse this definitively once. */
if (! cp_parser_uncommitted_to_tentative_parse_p (parser)
&& type_uses_auto (type_specified))
{
error ("invalid use of %<auto%> in conversion operator");
return error_mark_node;
}
return type_specified;
}
/* Parse an (optional) conversion-declarator.
conversion-declarator:
ptr-operator conversion-declarator [opt]
*/
static cp_declarator *
cp_parser_conversion_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree class_type;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Try the ptr-operator. */
code = cp_parser_ptr_operator (parser, &class_type, &cv_quals);
/* If it worked, look for more conversion-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
return cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator);
}
return NULL;
}
/* Parse an (optional) ctor-initializer.
ctor-initializer:
: mem-initializer-list
Returns TRUE iff the ctor-initializer was actually present. */
static bool
cp_parser_ctor_initializer_opt (cp_parser* parser)
{
/* If the next token is not a `:', then there is no
ctor-initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
/* Do default initialization of any bases and members. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (NULL_TREE);
return false;
}
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* And the mem-initializer-list. */
cp_parser_mem_initializer_list (parser);
return true;
}
/* Parse a mem-initializer-list.
mem-initializer-list:
mem-initializer ... [opt]
mem-initializer ... [opt] , mem-initializer-list */
static void
cp_parser_mem_initializer_list (cp_parser* parser)
{
tree mem_initializer_list = NULL_TREE;
tree target_ctor = error_mark_node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Let the semantic analysis code know that we are starting the
mem-initializer-list. */
if (!DECL_CONSTRUCTOR_P (current_function_decl))
error_at (token->location,
"only constructors take member initializers");
/* Loop through the list. */
while (true)
{
tree mem_initializer;
token = cp_lexer_peek_token (parser->lexer);
/* Parse the mem-initializer. */
mem_initializer = cp_parser_mem_initializer (parser);
/* If the next token is a `...', we're expanding member initializers. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* The TREE_PURPOSE must be a _TYPE, because base-specifiers
can be expanded but members cannot. */
if (mem_initializer != error_mark_node
&& !TYPE_P (TREE_PURPOSE (mem_initializer)))
{
error_at (token->location,
"cannot expand initializer for member %<%D%>",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Construct the pack expansion type. */
if (mem_initializer != error_mark_node)
mem_initializer = make_pack_expansion (mem_initializer);
}
if (target_ctor != error_mark_node
&& mem_initializer != error_mark_node)
{
error ("mem-initializer for %qD follows constructor delegation",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Look for a target constructor. */
if (mem_initializer != error_mark_node
&& TYPE_P (TREE_PURPOSE (mem_initializer))
&& same_type_p (TREE_PURPOSE (mem_initializer), current_class_type))
{
maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS);
if (mem_initializer_list)
{
error ("constructor delegation follows mem-initializer for %qD",
TREE_PURPOSE (mem_initializer_list));
mem_initializer = error_mark_node;
}
target_ctor = mem_initializer;
}
/* Add it to the list, unless it was erroneous. */
if (mem_initializer != error_mark_node)
{
TREE_CHAIN (mem_initializer) = mem_initializer_list;
mem_initializer_list = mem_initializer;
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
/* Perform semantic analysis. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (mem_initializer_list);
}
/* Parse a mem-initializer.
mem-initializer:
mem-initializer-id ( expression-list [opt] )
mem-initializer-id braced-init-list
GNU extension:
mem-initializer:
( expression-list [opt] )
Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base
class) or FIELD_DECL (for a non-static data member) to initialize;
the TREE_VALUE is the expression-list. An empty initialization
list is represented by void_list_node. */
static tree
cp_parser_mem_initializer (cp_parser* parser)
{
tree mem_initializer_id;
tree expression_list;
tree member;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Find out what is being initialized. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
permerror (token->location,
"anachronistic old-style base class initializer");
mem_initializer_id = NULL_TREE;
}
else
{
mem_initializer_id = cp_parser_mem_initializer_id (parser);
if (mem_initializer_id == error_mark_node)
return mem_initializer_id;
}
member = expand_member_init (mem_initializer_id);
if (member && !DECL_P (member))
in_base_initializer = 1;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
expression_list = build_tree_list (NULL_TREE, expression_list);
}
else
{
VEC(tree,gc)* vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
return error_mark_node;
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
if (expression_list == error_mark_node)
return error_mark_node;
if (!expression_list)
expression_list = void_type_node;
in_base_initializer = 0;
return member ? build_tree_list (member, expression_list) : error_mark_node;
}
/* Parse a mem-initializer-id.
mem-initializer-id:
:: [opt] nested-name-specifier [opt] class-name
identifier
Returns a TYPE indicating the class to be initializer for the first
production. Returns an IDENTIFIER_NODE indicating the data member
to be initialized for the second production. */
static tree
cp_parser_mem_initializer_id (cp_parser* parser)
{
bool global_scope_p;
bool nested_name_specifier_p;
bool template_p = false;
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* `typename' is not allowed in this context ([temp.res]). */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
error_at (token->location,
"keyword %<typename%> not allowed in this context (a qualified "
"member initializer is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to assume that we have seen the `typename' keyword at this
point. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
/*is_declaration=*/true)
!= NULL_TREE);
if (nested_name_specifier_p)
template_p = cp_parser_optional_template_keyword (parser);
/* If there is a `::' operator or a nested-name-specifier, then we
are definitely looking for a class-name. */
if (global_scope_p || nested_name_specifier_p)
return cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could also be looking for an ordinary identifier. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
id = cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* If we found one, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, look for an ordinary identifier. */
return cp_parser_identifier (parser);
}
/* Overloading [gram.over] */
/* Parse an operator-function-id.
operator-function-id:
operator operator
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator_function_id (cp_parser* parser)
{
/* Look for the `operator' keyword. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* And then the name of the operator itself. */
return cp_parser_operator (parser);
}
/* Return an identifier node for a user-defined literal operator.
The suffix identifier is chained to the operator name identifier. */
static tree
cp_literal_operator_id (const char* name)
{
tree identifier;
char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX)
+ strlen (name) + 10);
sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name);
identifier = get_identifier (buffer);
/*IDENTIFIER_UDLIT_OPNAME_P (identifier) = 1; If we get a flag someday. */
return identifier;
}
/* Parse an operator.
operator:
new delete new[] delete[] + - * / % ^ & | ~ ! = < >
+= -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= &&
|| ++ -- , ->* -> () []
GNU Extensions:
operator:
<? >? <?= >?=
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator (cp_parser* parser)
{
tree id = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Figure out which operator we have. */
switch (token->type)
{
case CPP_KEYWORD:
{
enum tree_code op;
/* The keyword should be either `new' or `delete'. */
if (token->keyword == RID_NEW)
op = NEW_EXPR;
else if (token->keyword == RID_DELETE)
op = DELETE_EXPR;
else
break;
/* Consume the `new' or `delete' token. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `[' token then this is the array variant of the
operator. */
if (token->type == CPP_OPEN_SQUARE)
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
id = ansi_opname (op == NEW_EXPR
? VEC_NEW_EXPR : VEC_DELETE_EXPR);
}
/* Otherwise, we have the non-array variant. */
else
id = ansi_opname (op);
return id;
}
case CPP_PLUS:
id = ansi_opname (PLUS_EXPR);
break;
case CPP_MINUS:
id = ansi_opname (MINUS_EXPR);
break;
case CPP_MULT:
id = ansi_opname (MULT_EXPR);
break;
case CPP_DIV:
id = ansi_opname (TRUNC_DIV_EXPR);
break;
case CPP_MOD:
id = ansi_opname (TRUNC_MOD_EXPR);
break;
case CPP_XOR:
id = ansi_opname (BIT_XOR_EXPR);
break;
case CPP_AND:
id = ansi_opname (BIT_AND_EXPR);
break;
case CPP_OR:
id = ansi_opname (BIT_IOR_EXPR);
break;
case CPP_COMPL:
id = ansi_opname (BIT_NOT_EXPR);
break;
case CPP_NOT:
id = ansi_opname (TRUTH_NOT_EXPR);
break;
case CPP_EQ:
id = ansi_assopname (NOP_EXPR);
break;
case CPP_LESS:
id = ansi_opname (LT_EXPR);
break;
case CPP_GREATER:
id = ansi_opname (GT_EXPR);
break;
case CPP_PLUS_EQ:
id = ansi_assopname (PLUS_EXPR);
break;
case CPP_MINUS_EQ:
id = ansi_assopname (MINUS_EXPR);
break;
case CPP_MULT_EQ:
id = ansi_assopname (MULT_EXPR);
break;
case CPP_DIV_EQ:
id = ansi_assopname (TRUNC_DIV_EXPR);
break;
case CPP_MOD_EQ:
id = ansi_assopname (TRUNC_MOD_EXPR);
break;
case CPP_XOR_EQ:
id = ansi_assopname (BIT_XOR_EXPR);
break;
case CPP_AND_EQ:
id = ansi_assopname (BIT_AND_EXPR);
break;
case CPP_OR_EQ:
id = ansi_assopname (BIT_IOR_EXPR);
break;
case CPP_LSHIFT:
id = ansi_opname (LSHIFT_EXPR);
break;
case CPP_RSHIFT:
id = ansi_opname (RSHIFT_EXPR);
break;
case CPP_LSHIFT_EQ:
id = ansi_assopname (LSHIFT_EXPR);
break;
case CPP_RSHIFT_EQ:
id = ansi_assopname (RSHIFT_EXPR);
break;
case CPP_EQ_EQ:
id = ansi_opname (EQ_EXPR);
break;
case CPP_NOT_EQ:
id = ansi_opname (NE_EXPR);
break;
case CPP_LESS_EQ:
id = ansi_opname (LE_EXPR);
break;
case CPP_GREATER_EQ:
id = ansi_opname (GE_EXPR);
break;
case CPP_AND_AND:
id = ansi_opname (TRUTH_ANDIF_EXPR);
break;
case CPP_OR_OR:
id = ansi_opname (TRUTH_ORIF_EXPR);
break;
case CPP_PLUS_PLUS:
id = ansi_opname (POSTINCREMENT_EXPR);
break;
case CPP_MINUS_MINUS:
id = ansi_opname (PREDECREMENT_EXPR);
break;
case CPP_COMMA:
id = ansi_opname (COMPOUND_EXPR);
break;
case CPP_DEREF_STAR:
id = ansi_opname (MEMBER_REF);
break;
case CPP_DEREF:
id = ansi_opname (COMPONENT_REF);
break;
case CPP_OPEN_PAREN:
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return ansi_opname (CALL_EXPR);
case CPP_OPEN_SQUARE:
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return ansi_opname (ARRAY_REF);
case CPP_STRING:
if (cxx_dialect == cxx98)
maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS);
if (TREE_STRING_LENGTH (token->u.value) > 2)
{
error ("expected empty string after %<operator%> keyword");
return error_mark_node;
}
/* Consume the string. */
cp_lexer_consume_token (parser->lexer);
/* Look for the suffix identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME)
{
id = cp_parser_identifier (parser);
if (id != error_mark_node)
{
const char *name = IDENTIFIER_POINTER (id);
return cp_literal_operator_id (name);
}
}
else
{
error ("expected suffix identifier");
return error_mark_node;
}
case CPP_STRING_USERDEF:
error ("missing space between %<\"\"%> and suffix identifier");
return error_mark_node;
default:
/* Anything else is an error. */
break;
}
/* If we have selected an identifier, we need to consume the
operator token. */
if (id)
cp_lexer_consume_token (parser->lexer);
/* Otherwise, no valid operator name was present. */
else
{
cp_parser_error (parser, "expected operator");
id = error_mark_node;
}
return id;
}
/* Parse a template-declaration.
template-declaration:
export [opt] template < template-parameter-list > declaration
If MEMBER_P is TRUE, this template-declaration occurs within a
class-specifier.
The grammar rule given by the standard isn't correct. What
is really meant is:
template-declaration:
export [opt] template-parameter-list-seq
decl-specifier-seq [opt] init-declarator [opt] ;
export [opt] template-parameter-list-seq
function-definition
template-parameter-list-seq:
template-parameter-list-seq [opt]
template < template-parameter-list > */
static void
cp_parser_template_declaration (cp_parser* parser, bool member_p)
{
/* Check for `export'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT))
{
/* Consume the `export' token. */
cp_lexer_consume_token (parser->lexer);
/* Warn that we do not support `export'. */
warning (0, "keyword %<export%> not implemented, and will be ignored");
}
cp_parser_template_declaration_after_export (parser, member_p);
}
/* Parse a template-parameter-list.
template-parameter-list:
template-parameter
template-parameter-list , template-parameter
Returns a TREE_LIST. Each node represents a template parameter.
The nodes are connected via their TREE_CHAINs. */
static tree
cp_parser_template_parameter_list (cp_parser* parser)
{
tree parameter_list = NULL_TREE;
begin_template_parm_list ();
/* The loop below parses the template parms. We first need to know
the total number of template parms to be able to compute proper
canonical types of each dependent type. So after the loop, when
we know the total number of template parms,
end_template_parm_list computes the proper canonical types and
fixes up the dependent types accordingly. */
while (true)
{
tree parameter;
bool is_non_type;
bool is_parameter_pack;
location_t parm_loc;
/* Parse the template-parameter. */
parm_loc = cp_lexer_peek_token (parser->lexer)->location;
parameter = cp_parser_template_parameter (parser,
&is_non_type,
&is_parameter_pack);
/* Add it to the list. */
if (parameter != error_mark_node)
parameter_list = process_template_parm (parameter_list,
parm_loc,
parameter,
is_non_type,
is_parameter_pack);
else
{
tree err_parm = build_tree_list (parameter, parameter);
parameter_list = chainon (parameter_list, err_parm);
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return end_template_parm_list (parameter_list);
}
/* Parse a template-parameter.
template-parameter:
type-parameter
parameter-declaration
If all goes well, returns a TREE_LIST. The TREE_VALUE represents
the parameter. The TREE_PURPOSE is the default value, if any.
Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true
iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is
set to true iff this parameter is a parameter pack. */
static tree
cp_parser_template_parameter (cp_parser* parser, bool *is_non_type,
bool *is_parameter_pack)
{
cp_token *token;
cp_parameter_declarator *parameter_declarator;
cp_declarator *id_declarator;
tree parm;
/* Assume it is a type parameter or a template parameter. */
*is_non_type = false;
/* Assume it not a parameter pack. */
*is_parameter_pack = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is `class' or `template', we have a type-parameter. */
if (token->keyword == RID_TEMPLATE)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it is `class' or `typename' we do not know yet whether it is a
type parameter or a non-type parameter. Consider:
template <typename T, typename T::X X> ...
or:
template <class C, class D*> ...
Here, the first parameter is a type parameter, and the second is
a non-type parameter. We can tell by looking at the token after
the identifier -- if it is a `,', `=', or `>' then we have a type
parameter. */
if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS)
{
/* Peek at the token after `class' or `typename'. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, we have a template type parameter
pack. */
if (token->type == CPP_ELLIPSIS)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it's an identifier, skip it. */
if (token->type == CPP_NAME)
token = cp_lexer_peek_nth_token (parser->lexer, 3);
/* Now, see if the token looks like the end of a template
parameter. */
if (token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
return cp_parser_type_parameter (parser, is_parameter_pack);
}
/* Otherwise, it is a non-type parameter.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
*is_non_type = true;
parameter_declarator
= cp_parser_parameter_declaration (parser, /*template_parm_p=*/true,
/*parenthesized_p=*/NULL);
/* If the parameter declaration is marked as a parameter pack, set
*IS_PARAMETER_PACK to notify the caller. Also, unmark the
declarator's PACK_EXPANSION_P, otherwise we'll get errors from
grokdeclarator. */
if (parameter_declarator
&& parameter_declarator->declarator
&& parameter_declarator->declarator->parameter_pack_p)
{
*is_parameter_pack = true;
parameter_declarator->declarator->parameter_pack_p = false;
}
/* If the next token is an ellipsis, and we don't already have it
marked as a parameter pack, then we have a parameter pack (that
has no declarator). */
if (!*is_parameter_pack
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
&& declarator_can_be_parameter_pack (parameter_declarator->declarator))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* We might end up with a pack expansion as the type of the non-type
template parameter, in which case this is a non-type template
parameter pack. */
else if (parameter_declarator
&& parameter_declarator->decl_specifiers.type
&& PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type))
{
*is_parameter_pack = true;
parameter_declarator->decl_specifiers.type =
PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type);
}
if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Parameter packs cannot have default arguments. However, a
user may try to do so, so we'll parse them and give an
appropriate diagnostic here. */
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
/* Find the name of the parameter pack. */
id_declarator = parameter_declarator->declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (start_token->location,
"template parameter pack %qD cannot have a default argument",
id_declarator->u.id.unqualified_name);
else
error_at (start_token->location,
"template parameter pack cannot have a default argument");
/* Parse the default argument, but throw away the result. */
cp_parser_default_argument (parser, /*template_parm_p=*/true);
}
parm = grokdeclarator (parameter_declarator->declarator,
¶meter_declarator->decl_specifiers,
TPARM, /*initialized=*/0,
/*attrlist=*/NULL);
if (parm == error_mark_node)
return error_mark_node;
return build_tree_list (parameter_declarator->default_argument, parm);
}
/* Parse a type-parameter.
type-parameter:
class identifier [opt]
class identifier [opt] = type-id
typename identifier [opt]
typename identifier [opt] = type-id
template < template-parameter-list > class identifier [opt]
template < template-parameter-list > class identifier [opt]
= id-expression
GNU Extension (variadic templates):
type-parameter:
class ... identifier [opt]
typename ... identifier [opt]
Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The
TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is
the declaration of the parameter.
Sets *IS_PARAMETER_PACK if this is a template parameter pack. */
static tree
cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack)
{
cp_token *token;
tree parameter;
/* Look for a keyword to tell us what kind of parameter this is. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE);
if (!token)
return error_mark_node;
switch (token->keyword)
{
case RID_CLASS:
case RID_TYPENAME:
{
tree identifier;
tree default_argument;
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an identifier, then it names the
parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Create the parameter. */
parameter = finish_template_type_parm (class_type_node, identifier);
/* If the next token is an `=', we have a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the default-argument. */
push_deferring_access_checks (dk_no_deferred);
default_argument = cp_parser_type_id (parser);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot have a "
"default argument", identifier);
else
error_at (token->location,
"template parameter packs cannot have "
"default arguments");
default_argument = NULL_TREE;
}
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
case RID_TEMPLATE:
{
tree identifier;
tree default_argument;
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the template-parameter-list. */
cp_parser_template_parameter_list (parser);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Look for the `class' keyword. */
cp_parser_require_keyword (parser, RID_CLASS, RT_CLASS);
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an `=', then there is a
default-argument. If the next token is a `>', we are at
the end of the parameter-list. If the next token is a `,',
then we are at the end of this parameter. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
{
identifier = cp_parser_identifier (parser);
/* Treat invalid names as if the parameter were nameless. */
if (identifier == error_mark_node)
identifier = NULL_TREE;
}
else
identifier = NULL_TREE;
/* Create the template parameter. */
parameter = finish_template_template_parm (class_type_node,
identifier);
/* If the next token is an `=', then there is a
default-argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
bool is_template;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the id-expression. */
push_deferring_access_checks (dk_no_deferred);
/* save token before parsing the id-expression, for error
reporting */
token = cp_lexer_peek_token (parser->lexer);
default_argument
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/&is_template,
/*declarator_p=*/false,
/*optional_p=*/false);
if (TREE_CODE (default_argument) == TYPE_DECL)
/* If the id-expression was a template-id that refers to
a template-class, we already have the declaration here,
so no further lookup is needed. */
;
else
/* Look up the name. */
default_argument
= cp_parser_lookup_name (parser, default_argument,
none_type,
/*is_template=*/is_template,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* See if the default argument is valid. */
default_argument
= check_template_template_default_arg (default_argument);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot "
"have a default argument",
identifier);
else
error_at (token->location, "template parameter packs cannot "
"have default arguments");
default_argument = NULL_TREE;
}
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
default:
gcc_unreachable ();
break;
}
return parameter;
}
/* Parse a template-id.
template-id:
template-name < template-argument-list [opt] >
If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the
`template' keyword. In this case, a TEMPLATE_ID_EXPR will be
returned. Otherwise, if the template-name names a function, or set
of functions, returns a TEMPLATE_ID_EXPR. If the template-name
names a class, returns a TYPE_DECL for the specialization.
If CHECK_DEPENDENCY_P is FALSE, names are looked up in
uninstantiated templates. */
static tree
cp_parser_template_id (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration)
{
int i;
tree templ;
tree arguments;
tree template_id;
cp_token_position start_of_id = 0;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *access_check;
cp_token *next_token = NULL, *next_token_2 = NULL;
bool is_identifier;
/* If the next token corresponds to a template-id, there is no need
to reparse it. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type == CPP_TEMPLATE_ID)
{
struct tree_check *check_value;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
access_check = check_value->checks;
if (access_check)
{
FOR_EACH_VEC_ELT (deferred_access_check, access_check, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
/* Return the stored value. */
return check_value->value;
}
/* Avoid performing name lookup if there is no possibility of
finding a template-id. */
if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR)
|| (next_token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2)))
{
cp_parser_error (parser, "expected template-id");
return error_mark_node;
}
/* Remember where the template-id starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start_of_id = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
/* Parse the template-name. */
is_identifier = false;
templ = cp_parser_template_name (parser, template_keyword_p,
check_dependency_p,
is_declaration,
&is_identifier);
if (templ == error_mark_node || is_identifier)
{
pop_deferring_access_checks ();
return templ;
}
/* If we find the sequence `[:' after a template-name, it's probably
a digraph-typo for `< ::'. Substitute the tokens and check if we can
parse correctly the argument list. */
next_token = cp_lexer_peek_token (parser->lexer);
next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next_token->type == CPP_OPEN_SQUARE
&& next_token->flags & DIGRAPH
&& next_token_2->type == CPP_COLON
&& !(next_token_2->flags & PREV_WHITE))
{
cp_parser_parse_tentatively (parser);
/* Change `:' into `::'. */
next_token_2->type = CPP_SCOPE;
/* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is
CPP_LESS. */
cp_lexer_consume_token (parser->lexer);
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
if (!cp_parser_parse_definitely (parser))
{
/* If we couldn't parse an argument list, then we revert our changes
and return simply an error. Maybe this is not a template-id
after all. */
next_token_2->type = CPP_COLON;
cp_parser_error (parser, "expected %<<%>");
pop_deferring_access_checks ();
return error_mark_node;
}
/* Otherwise, emit an error about the invalid digraph, but continue
parsing because we got our argument list. */
if (permerror (next_token->location,
"%<<::%> cannot begin a template-argument list"))
{
static bool hint = false;
inform (next_token->location,
"%<<:%> is an alternate spelling for %<[%>."
" Insert whitespace between %<<%> and %<::%>");
if (!hint && !flag_permissive)
{
inform (next_token->location, "(if you use %<-fpermissive%>"
" G++ will accept your code)");
hint = true;
}
}
}
else
{
/* Look for the `<' that starts the template-argument-list. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
}
/* Build a representation of the specialization. */
if (TREE_CODE (templ) == IDENTIFIER_NODE)
template_id = build_min_nt (TEMPLATE_ID_EXPR, templ, arguments);
else if (DECL_TYPE_TEMPLATE_P (templ)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (templ))
{
bool entering_scope;
/* In "template <typename T> ... A<T>::", A<T> is the abstract A
template (rather than some instantiation thereof) only if
is not nested within some other construct. For example, in
"template <typename T> void f(T) { A<T>::", A<T> is just an
instantiation of A. */
entering_scope = (template_parm_scope_p ()
&& cp_lexer_next_token_is (parser->lexer,
CPP_SCOPE));
template_id
= finish_template_type (templ, arguments, entering_scope);
}
else
{
/* If it's not a class-template or a template-template, it should be
a function-template. */
gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ)
|| TREE_CODE (templ) == OVERLOAD
|| BASELINK_P (templ)));
template_id = lookup_template_function (templ, arguments);
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the template-id with a CPP_TEMPLATE_ID token. That way,
should we re-parse the token stream, we will not have to repeat
the effort required to do the parse, nor will we issue duplicate
error messages about problems during instantiation of the
template. */
if (start_of_id)
{
cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id);
/* Reset the contents of the START_OF_ID token. */
token->type = CPP_TEMPLATE_ID;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_alloc_cleared_tree_check ();
token->u.tree_check_value->value = template_id;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start_of_id);
/* ??? Can we actually assume that, if template_id ==
error_mark_node, we will have issued a diagnostic to the
user, as opposed to simply marking the tentative parse as
failed? */
if (cp_parser_error_occurred (parser) && template_id != error_mark_node)
error_at (token->location, "parse error in template argument list");
}
pop_deferring_access_checks ();
return template_id;
}
/* Parse a template-name.
template-name:
identifier
The standard should actually say:
template-name:
identifier
operator-function-id
A defect report has been filed about this issue.
A conversion-function-id cannot be a template name because they cannot
be part of a template-id. In fact, looking at this code:
a.operator K<int>()
the conversion-function-id is "operator K<int>", and K<int> is a type-id.
It is impossible to call a templated conversion-function-id with an
explicit argument list, since the only allowed template parameter is
the type to which it is converting.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword, in a construction like:
T::template f<3>()
In that case `f' is taken to be a template-name, even though there
is no way of knowing for sure.
Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the
name refers to a set of overloaded functions, at least one of which
is a template, or an IDENTIFIER_NODE with the name of the template,
if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE,
names are looked up inside uninstantiated templates. */
static tree
cp_parser_template_name (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration,
bool *is_identifier)
{
tree identifier;
tree decl;
tree fns;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `operator', then we have either an
operator-function-id or a conversion-function-id. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR))
{
/* We don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
identifier = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
{
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* Look for the identifier. */
else
identifier = cp_parser_identifier (parser);
/* If we didn't find an identifier, we don't have a template-id. */
if (identifier == error_mark_node)
return error_mark_node;
/* If the name immediately followed the `template' keyword, then it
is a template-name. However, if the next token is not `<', then
we do not treat it as a template-name, since it is not being used
as part of a template-id. This enables us to handle constructs
like:
template <typename T> struct S { S(); };
template <typename T> S<T>::S();
correctly. We would treat `S' as a template -- if it were `S<T>'
-- but we do not if there is no `<'. */
if (processing_template_decl
&& cp_parser_nth_token_starts_template_argument_list_p (parser, 1))
{
/* In a declaration, in a dependent context, we pretend that the
"template" keyword was present in order to improve error
recovery. For example, given:
template <typename T> void f(T::X<int>);
we want to treat "X<int>" as a template-id. */
if (is_declaration
&& !template_keyword_p
&& parser->scope && TYPE_P (parser->scope)
&& check_dependency_p
&& dependent_scope_p (parser->scope)
/* Do not do this for dtors (or ctors), since they never
need the template keyword before their name. */
&& !constructor_name_p (identifier, parser->scope))
{
cp_token_position start = 0;
/* Explain what went wrong. */
error_at (token->location, "non-template %qD used as template",
identifier);
inform (token->location, "use %<%T::template %D%> to indicate that it is a template",
parser->scope, identifier);
/* If parsing tentatively, find the location of the "<" token. */
if (cp_parser_simulate_error (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Parse the template arguments so that we can issue error
messages about them. */
cp_lexer_consume_token (parser->lexer);
cp_parser_enclosed_template_argument_list (parser);
/* Skip tokens until we find a good place from which to
continue parsing. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/false);
/* If parsing tentatively, permanently remove the
template argument list. That will prevent duplicate
error messages from being issued about the missing
"template" keyword. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
if (is_identifier)
*is_identifier = true;
return identifier;
}
/* If the "template" keyword is present, then there is generally
no point in doing name-lookup, so we just return IDENTIFIER.
But, if the qualifying scope is non-dependent then we can
(and must) do name-lookup normally. */
if (template_keyword_p
&& (!parser->scope
|| (TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))))
return identifier;
}
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/true,
/*is_namespace=*/false,
check_dependency_p,
/*ambiguous_decls=*/NULL,
token->location);
/* If DECL is a template, then the name was a template-name. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
;
else
{
tree fn = NULL_TREE;
/* The standard does not explicitly indicate whether a name that
names a set of overloaded declarations, some of which are
templates, is a template-name. However, such a name should
be a template-name; otherwise, there is no way to form a
template-id for the overloaded templates. */
fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl;
if (TREE_CODE (fns) == OVERLOAD)
for (fn = fns; fn; fn = OVL_NEXT (fn))
if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL)
break;
if (!fn)
{
/* The name does not name a template. */
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* If DECL is dependent, and refers to a function, then just return
its name; we will look it up again during template instantiation. */
if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl))
{
tree scope = ovl_scope (decl);
if (TYPE_P (scope) && dependent_type_p (scope))
return identifier;
}
return decl;
}
/* Parse a template-argument-list.
template-argument-list:
template-argument ... [opt]
template-argument-list , template-argument ... [opt]
Returns a TREE_VEC containing the arguments. */
static tree
cp_parser_template_argument_list (cp_parser* parser)
{
tree fixed_args[10];
unsigned n_args = 0;
unsigned alloced = 10;
tree *arg_ary = fixed_args;
tree vec;
bool saved_in_template_argument_list_p;
bool saved_ice_p;
bool saved_non_ice_p;
saved_in_template_argument_list_p = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
/* Even if the template-id appears in an integral
constant-expression, the contents of the argument list do
not. */
saved_ice_p = parser->integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_non_ice_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p = false;
/* Parse the arguments. */
do
{
tree argument;
if (n_args)
/* Consume the comma. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-argument. */
argument = cp_parser_template_argument (parser);
/* If the next token is an ellipsis, we're expanding a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
if (argument == error_mark_node)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"expected parameter pack before %<...%>");
}
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* Make the argument into a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
argument = make_pack_expansion (argument);
}
if (n_args == alloced)
{
alloced *= 2;
if (arg_ary == fixed_args)
{
arg_ary = XNEWVEC (tree, alloced);
memcpy (arg_ary, fixed_args, sizeof (tree) * n_args);
}
else
arg_ary = XRESIZEVEC (tree, arg_ary, alloced);
}
arg_ary[n_args++] = argument;
}
while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
vec = make_tree_vec (n_args);
while (n_args--)
TREE_VEC_ELT (vec, n_args) = arg_ary[n_args];
if (arg_ary != fixed_args)
free (arg_ary);
parser->non_integral_constant_expression_p = saved_non_ice_p;
parser->integral_constant_expression_p = saved_ice_p;
parser->in_template_argument_list_p = saved_in_template_argument_list_p;
#ifdef ENABLE_CHECKING
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
#endif
return vec;
}
/* Parse a template-argument.
template-argument:
assignment-expression
type-id
id-expression
The representation is that of an assignment-expression, type-id, or
id-expression -- except that the qualified id-expression is
evaluated, so that the value returned is either a DECL or an
OVERLOAD.
Although the standard says "assignment-expression", it forbids
throw-expressions or assignments in the template argument.
Therefore, we use "conditional-expression" instead. */
static tree
cp_parser_template_argument (cp_parser* parser)
{
tree argument;
bool template_p;
bool address_p;
bool maybe_type_id = false;
cp_token *token = NULL, *argument_start_token = NULL;
cp_id_kind idk;
/* There's really no way to know what we're looking at, so we just
try each alternative in order.
[temp.arg]
In a template-argument, an ambiguity between a type-id and an
expression is resolved to a type-id, regardless of the form of
the corresponding template-parameter.
Therefore, we try a type-id first. */
cp_parser_parse_tentatively (parser);
argument = cp_parser_template_type_arg (parser);
/* If there was no error parsing the type-id but the next token is a
'>>', our behavior depends on which dialect of C++ we're
parsing. In C++98, we probably found a typo for '> >'. But there
are type-id which are also valid expressions. For instance:
struct X { int operator >> (int); };
template <int V> struct Foo {};
Foo<X () >> 5> r;
Here 'X()' is a valid type-id of a function type, but the user just
wanted to write the expression "X() >> 5". Thus, we remember that we
found a valid type-id, but we still try to parse the argument as an
expression to see what happens.
In C++0x, the '>>' will be considered two separate '>'
tokens. */
if (!cp_parser_error_occurred (parser)
&& cxx_dialect == cxx98
&& cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
maybe_type_id = true;
cp_parser_abort_tentative_parse (parser);
}
else
{
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. This means that the argument is not a valid
type-id. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return argument;
}
/* We're still not sure what the argument will be. */
cp_parser_parse_tentatively (parser);
/* Try a template. */
argument_start_token = cp_lexer_peek_token (parser->lexer);
argument = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (!cp_parser_error_occurred (parser))
{
/* Figure out what is being referred to. If the id-expression
was for a class template specialization, then we will have a
TYPE_DECL at this point. There is no need to do name lookup
at this point in that case. */
if (TREE_CODE (argument) != TYPE_DECL)
argument = cp_parser_lookup_name (parser, argument,
none_type,
/*is_template=*/template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
argument_start_token->location);
if (TREE_CODE (argument) != TEMPLATE_DECL
&& TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE)
cp_parser_error (parser, "expected template-name");
}
if (cp_parser_parse_definitely (parser))
return argument;
/* It must be a non-type argument. There permitted cases are given
in [temp.arg.nontype]:
-- an integral constant-expression of integral or enumeration
type; or
-- the name of a non-type template-parameter; or
-- the name of an object or function with external linkage...
-- the address of an object or function with external linkage...
-- a pointer to member... */
/* Look for a non-type template parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
/*address_p=*/false,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
return argument;
}
/* If the next token is "&", the argument must be the address of an
object or function with external linkage. */
address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND);
if (address_p)
cp_lexer_consume_token (parser->lexer);
/* See if we might have an id-expression. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->keyword == RID_OPERATOR
|| token->type == CPP_SCOPE
|| token->type == CPP_TEMPLATE_ID
|| token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
address_p,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (cp_parser_error_occurred (parser)
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_abort_tentative_parse (parser);
else
{
tree probe;
if (TREE_CODE (argument) == INDIRECT_REF)
{
gcc_assert (REFERENCE_REF_P (argument));
argument = TREE_OPERAND (argument, 0);
}
/* If we're in a template, we represent a qualified-id referring
to a static data member as a SCOPE_REF even if the scope isn't
dependent so that we can check access control later. */
probe = argument;
if (TREE_CODE (probe) == SCOPE_REF)
probe = TREE_OPERAND (probe, 1);
if (TREE_CODE (probe) == VAR_DECL)
{
/* A variable without external linkage might still be a
valid constant-expression, so no error is issued here
if the external-linkage check fails. */
if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe))
cp_parser_simulate_error (parser);
}
else if (is_overloaded_fn (argument))
/* All overloaded functions are allowed; if the external
linkage test does not pass, an error will be issued
later. */
;
else if (address_p
&& (TREE_CODE (argument) == OFFSET_REF
|| TREE_CODE (argument) == SCOPE_REF))
/* A pointer-to-member. */
;
else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX)
;
else
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
{
if (address_p)
argument = build_x_unary_op (ADDR_EXPR, argument,
tf_warning_or_error);
return argument;
}
}
}
/* If the argument started with "&", there are no other valid
alternatives at this point. */
if (address_p)
{
cp_parser_error (parser, "invalid non-type template argument");
return error_mark_node;
}
/* If the argument wasn't successfully parsed as a type-id followed
by '>>', the argument can only be a constant expression now.
Otherwise, we try parsing the constant-expression tentatively,
because the argument could really be a type-id. */
if (maybe_type_id)
cp_parser_parse_tentatively (parser);
argument = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
/*non_constant_p=*/NULL);
argument = fold_non_dependent_expr (argument);
if (!maybe_type_id)
return argument;
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (cp_parser_parse_definitely (parser))
return argument;
/* We did our best to parse the argument as a non type-id, but that
was the only alternative that matched (albeit with a '>' after
it). We can assume it's just a typo from the user, and a
diagnostic will then be issued. */
return cp_parser_template_type_arg (parser);
}
/* Parse an explicit-instantiation.
explicit-instantiation:
template declaration
Although the standard says `declaration', what it really means is:
explicit-instantiation:
template decl-specifier-seq [opt] declarator [opt] ;
Things like `template int S<int>::i = 5, int S<double>::j;' are not
supposed to be allowed. A defect report has been filed about this
issue.
GNU Extension:
explicit-instantiation:
storage-class-specifier template
decl-specifier-seq [opt] declarator [opt] ;
function-specifier template
decl-specifier-seq [opt] declarator [opt] ; */
static void
cp_parser_explicit_instantiation (cp_parser* parser)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
tree extension_specifier = NULL_TREE;
timevar_push (TV_TEMPLATE_INST);
/* Look for an (optional) storage-class-specifier or
function-specifier. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
extension_specifier
= cp_parser_storage_class_specifier_opt (parser);
if (!extension_specifier)
extension_specifier
= cp_parser_function_specifier_opt (parser,
/*decl_specs=*/NULL);
}
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Let the front end know that we are processing an explicit
instantiation. */
begin_explicit_instantiation ();
/* [temp.explicit] says that we are supposed to ignore access
control while processing explicit instantiation directives. */
push_deferring_access_checks (dk_no_check);
/* Parse a decl-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* If there was exactly one decl-specifier, and it declared a class,
and there's no declarator, then we have an explicit type
instantiation. */
if (declares_class_or_enum && cp_parser_declares_only_class_p (parser))
{
tree type;
type = check_tag_decl (&decl_specifiers);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
if (type)
do_type_instantiation (type, extension_specifier,
/*complain=*/tf_error);
}
else
{
cp_declarator *declarator;
tree decl;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers.type,
decl_specifiers.type_location);
if (declarator != cp_error_declarator)
{
if (decl_specifiers.specs[(int)ds_inline])
permerror (input_location, "explicit instantiation shall not use"
" %<inline%> specifier");
if (decl_specifiers.specs[(int)ds_constexpr])
permerror (input_location, "explicit instantiation shall not use"
" %<constexpr%> specifier");
decl = grokdeclarator (declarator, &decl_specifiers,
NORMAL, 0, &decl_specifiers.attributes);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
/* Do the explicit instantiation. */
do_decl_instantiation (decl, extension_specifier);
}
else
{
pop_deferring_access_checks ();
/* Skip the body of the explicit instantiation. */
cp_parser_skip_to_end_of_statement (parser);
}
}
/* We're done with the instantiation. */
end_explicit_instantiation ();
cp_parser_consume_semicolon_at_end_of_statement (parser);
timevar_pop (TV_TEMPLATE_INST);
}
/* Parse an explicit-specialization.
explicit-specialization:
template < > declaration
Although the standard says `declaration', what it really means is:
explicit-specialization:
template <> decl-specifier [opt] init-declarator [opt] ;
template <> function-definition
template <> explicit-specialization
template <> template-declaration */
static void
cp_parser_explicit_specialization (cp_parser* parser)
{
bool need_lang_pop;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* We have processed another parameter list. */
++parser->num_template_parameter_lists;
/* [temp]
A template ... explicit specialization ... shall not have C
linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template specialization with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* Let the front end know that we are beginning a specialization. */
if (!begin_specialization ())
{
end_specialization ();
return;
}
/* If the next keyword is `template', we need to figure out whether
or not we're looking a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER)
cp_parser_template_declaration_after_export (parser,
/*member_p=*/false);
else
cp_parser_explicit_specialization (parser);
}
else
/* Parse the dependent declaration. */
cp_parser_single_declaration (parser,
/*checks=*/NULL,
/*member_p=*/false,
/*explicit_specialization_p=*/true,
/*friend_p=*/NULL);
/* We're done with the specialization. */
end_specialization ();
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* We're done with this parameter list. */
--parser->num_template_parameter_lists;
}
/* Parse a type-specifier.
type-specifier:
simple-type-specifier
class-specifier
enum-specifier
elaborated-type-specifier
cv-qualifier
GNU Extension:
type-specifier:
__complex__
Returns a representation of the type-specifier. For a
class-specifier, enum-specifier, or elaborated-type-specifier, a
TREE_TYPE is returned; otherwise, a TYPE_DECL is returned.
The parser flags FLAGS is used to control type-specifier parsing.
If IS_DECLARATION is TRUE, then this type-specifier is appearing
in a decl-specifier-seq.
If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a
class-specifier, enum-specifier, or elaborated-type-specifier, then
*DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1
if a type is declared; 2 if it is defined. Otherwise, it is set to
zero.
If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a
cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it
is set to FALSE. */
static tree
cp_parser_type_specifier (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
bool is_declaration,
int* declares_class_or_enum,
bool* is_cv_qualifier)
{
tree type_spec = NULL_TREE;
cp_token *token;
enum rid keyword;
cp_decl_spec ds = ds_last;
/* Assume this type-specifier does not declare a new type. */
if (declares_class_or_enum)
*declares_class_or_enum = 0;
/* And that it does not specify a cv-qualifier. */
if (is_cv_qualifier)
*is_cv_qualifier = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, we can use that to guide the
production we choose. */
keyword = token->keyword;
switch (keyword)
{
case RID_ENUM:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Look for the enum-specifier. */
type_spec = cp_parser_enum_specifier (parser);
/* If that worked, we're done. */
if (type_spec)
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/true);
return type_spec;
}
else
goto elaborated_type_specifier;
/* Any of these indicate either a class-specifier, or an
elaborated-type-specifier. */
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Parse tentatively so that we can back up if we don't find a
class-specifier. */
cp_parser_parse_tentatively (parser);
/* Look for the class-specifier. */
type_spec = cp_parser_class_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/true);
return type_spec;
}
/* Fall through. */
elaborated_type_specifier:
/* We're declaring (not defining) a class or enum. */
if (declares_class_or_enum)
*declares_class_or_enum = 1;
/* Fall through. */
case RID_TYPENAME:
/* Look for an elaborated-type-specifier. */
type_spec
= (cp_parser_elaborated_type_specifier
(parser,
decl_specs && decl_specs->specs[(int) ds_friend],
is_declaration));
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token->location,
/*type_definition_p=*/false);
return type_spec;
case RID_CONST:
ds = ds_const;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_VOLATILE:
ds = ds_volatile;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_RESTRICT:
ds = ds_restrict;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_COMPLEX:
/* The `__complex__' keyword is a GNU extension. */
ds = ds_complex;
break;
default:
break;
}
/* Handle simple keywords. */
if (ds != ds_last)
{
if (decl_specs)
{
++decl_specs->specs[(int)ds];
decl_specs->any_specifiers_p = true;
}
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* If we do not already have a type-specifier, assume we are looking
at a simple-type-specifier. */
type_spec = cp_parser_simple_type_specifier (parser,
decl_specs,
flags);
/* If we didn't find a type-specifier, and a type-specifier was not
optional in this context, issue an error message. */
if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type specifier");
return error_mark_node;
}
return type_spec;
}
/* Parse a simple-type-specifier.
simple-type-specifier:
:: [opt] nested-name-specifier [opt] type-name
:: [opt] nested-name-specifier template template-id
char
wchar_t
bool
short
int
long
signed
unsigned
float
double
void
C++0x Extension:
simple-type-specifier:
auto
decltype ( expression )
char16_t
char32_t
__underlying_type ( type-id )
GNU Extension:
simple-type-specifier:
__int128
__typeof__ unary-expression
__typeof__ ( type-id )
Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is
appropriately updated. */
static tree
cp_parser_simple_type_specifier (cp_parser* parser,
cp_decl_specifier_seq *decl_specs,
cp_parser_flags flags)
{
tree type = NULL_TREE;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, things are easy. */
switch (token->keyword)
{
case RID_CHAR:
if (decl_specs)
decl_specs->explicit_char_p = true;
type = char_type_node;
break;
case RID_CHAR16:
type = char16_type_node;
break;
case RID_CHAR32:
type = char32_type_node;
break;
case RID_WCHAR:
type = wchar_type_node;
break;
case RID_BOOL:
type = boolean_type_node;
break;
case RID_SHORT:
if (decl_specs)
++decl_specs->specs[(int) ds_short];
type = short_integer_type_node;
break;
case RID_INT:
if (decl_specs)
decl_specs->explicit_int_p = true;
type = integer_type_node;
break;
case RID_INT128:
if (!int128_integer_type_node)
break;
if (decl_specs)
decl_specs->explicit_int128_p = true;
type = int128_integer_type_node;
break;
case RID_LONG:
if (decl_specs)
++decl_specs->specs[(int) ds_long];
type = long_integer_type_node;
break;
case RID_SIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_signed];
type = integer_type_node;
break;
case RID_UNSIGNED:
if (decl_specs)
++decl_specs->specs[(int) ds_unsigned];
type = unsigned_type_node;
break;
case RID_FLOAT:
type = float_type_node;
break;
case RID_DOUBLE:
type = double_type_node;
break;
case RID_VOID:
type = void_type_node;
break;
case RID_AUTO:
maybe_warn_cpp0x (CPP0X_AUTO);
type = make_auto ();
break;
case RID_DECLTYPE:
/* Since DR 743, decltype can either be a simple-type-specifier by
itself or begin a nested-name-specifier. Parsing it will replace
it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE
handling below decide what to do. */
cp_parser_decltype (parser);
cp_lexer_set_token_position (parser->lexer, token);
break;
case RID_TYPEOF:
/* Consume the `typeof' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand to `typeof'. */
type = cp_parser_sizeof_operand (parser, RID_TYPEOF);
/* If it is not already a TYPE, take its type. */
if (!TYPE_P (type))
type = finish_typeof (type);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
case RID_UNDERLYING_TYPE:
type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
case RID_BASES:
case RID_DIRECT_BASES:
type = cp_parser_trait_expr (parser, token->keyword);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
return type;
default:
break;
}
/* If token is an already-parsed decltype not followed by ::,
it's a simple-type-specifier. */
if (token->type == CPP_DECLTYPE
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
{
type = token->u.value;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
cp_lexer_consume_token (parser->lexer);
return type;
}
/* If the type-specifier was for a built-in type, we're done. */
if (type)
{
/* Record the type. */
if (decl_specs
&& (token->keyword != RID_SIGNED
&& token->keyword != RID_UNSIGNED
&& token->keyword != RID_SHORT
&& token->keyword != RID_LONG))
cp_parser_set_decl_spec_type (decl_specs,
type,
token->location,
/*type_definition_p=*/false);
if (decl_specs)
decl_specs->any_specifiers_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user thought
that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, type, token->location);
return TYPE_NAME (type);
}
/* The type-specifier must be a user-defined type. */
if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES))
{
bool qualified_p;
bool global_p;
/* Don't gobble tokens or issue error messages if this is an
optional type-specifier. */
if (flags & CP_PARSER_FLAGS_OPTIONAL)
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
global_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the nested-name specifier. */
qualified_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
token = cp_lexer_peek_token (parser->lexer);
/* If we have seen a nested-name-specifier, and the next token
is `template', then we are using the template-id production. */
if (parser->scope
&& cp_parser_optional_template_keyword (parser))
{
/* Look for the template-id. */
type = cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/true,
/*is_declaration=*/false);
/* If the template-id did not name a type, we are out of
luck. */
if (TREE_CODE (type) != TYPE_DECL)
{
cp_parser_error (parser, "expected template-id for type");
type = NULL_TREE;
}
}
/* Otherwise, look for a type-name. */
else
type = cp_parser_type_name (parser);
/* Keep track of all name-lookups performed in class scopes. */
if (type
&& !global_p
&& !qualified_p
&& TREE_CODE (type) == TYPE_DECL
&& TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE)
maybe_note_name_used_in_class (DECL_NAME (type), type);
/* If it didn't work out, we don't have a TYPE. */
if ((flags & CP_PARSER_FLAGS_OPTIONAL)
&& !cp_parser_parse_definitely (parser))
type = NULL_TREE;
if (type && decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token->location,
/*type_definition_p=*/false);
}
/* If we didn't get a type-name, issue an error message. */
if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type-name");
return error_mark_node;
}
if (type && type != error_mark_node)
{
/* See if TYPE is an Objective-C type, and if so, parse and
accept any protocol references following it. Do this before
the cp_parser_check_for_invalid_template_id() call, because
Objective-C types can be followed by '<...>' which would
enclose protocol names rather than template arguments, and so
everything is fine. */
if (c_dialect_objc () && !parser->scope
&& (objc_is_id (type) || objc_is_class_name (type)))
{
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree qual_type = objc_get_protocol_qualified_type (type, protos);
/* Clobber the "unqualified" type previously entered into
DECL_SPECS with the new, improved protocol-qualified version. */
if (decl_specs)
decl_specs->type = qual_type;
return qual_type;
}
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user
thought that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type),
token->location);
}
return type;
}
/* Parse a type-name.
type-name:
class-name
enum-name
typedef-name
simple-template-id [in c++0x]
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_type_name (cp_parser* parser)
{
tree type_decl;
/* We can't know yet whether it is a class-name or not. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If it's not a class-name, keep looking. */
if (!cp_parser_parse_definitely (parser))
{
if (cxx_dialect < cxx0x)
/* It must be a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
cp_parser_parse_tentatively (parser);
/* It is either a simple-template-id representing an
instantiation of an alias template... */
type_decl = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*is_declaration=*/false);
/* Note that this must be an instantiation of an alias template
because [temp.names]/6 says:
A template-id that names an alias template specialization
is a type-name.
Whereas [temp.names]/7 says:
A simple-template-id that names a class template
specialization is a class-name. */
if (type_decl != NULL_TREE
&& TREE_CODE (type_decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (type_decl))
gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl));
else
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
/* ... Or a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
}
return type_decl;
}
/* Parse a non-class type-name, that is, either an enum-name or a typedef-name.
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_nonclass_name (cp_parser* parser)
{
tree type_decl;
tree identifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the type-name. */
type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location);
if (TREE_CODE (type_decl) == USING_DECL)
{
if (!DECL_DEPENDENT_P (type_decl))
type_decl = strip_using_decl (type_decl);
else if (USING_DECL_TYPENAME_P (type_decl))
{
/* We have found a type introduced by a using
declaration at class scope that refers to a dependent
type.
using typename :: [opt] nested-name-specifier unqualified-id ;
*/
type_decl = make_typename_type (TREE_TYPE (type_decl),
DECL_NAME (type_decl),
typename_type, tf_error);
if (type_decl != error_mark_node)
type_decl = TYPE_NAME (type_decl);
}
}
if (TREE_CODE (type_decl) != TYPE_DECL
&& (objc_is_id (identifier) || objc_is_class_name (identifier)))
{
/* See if this is an Objective-C type. */
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree type = objc_get_protocol_qualified_type (identifier, protos);
if (type)
type_decl = TYPE_NAME (type);
}
/* Issue an error if we did not find a type-name. */
if (TREE_CODE (type_decl) != TYPE_DECL
/* In Objective-C, we have the complication that class names are
normally type names and start declarations (eg, the
"NSObject" in "NSObject *object;"), but can be used in an
Objective-C 2.0 dot-syntax (as in "NSObject.version") which
is an expression. So, a classname followed by a dot is not a
valid type-name. */
|| (objc_is_class_name (TREE_TYPE (type_decl))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT))
{
if (!cp_parser_simulate_error (parser))
cp_parser_name_lookup_error (parser, identifier, type_decl,
NLE_TYPE, token->location);
return error_mark_node;
}
/* Remember that the name was used in the definition of the
current class so that we can check later to see if the
meaning would have been different after the class was
entirely defined. */
else if (type_decl != error_mark_node
&& !parser->scope)
maybe_note_name_used_in_class (identifier, type_decl);
return type_decl;
}
/* Parse an elaborated-type-specifier. Note that the grammar given
here incorporates the resolution to DR68.
elaborated-type-specifier:
class-key :: [opt] nested-name-specifier [opt] identifier
class-key :: [opt] nested-name-specifier [opt] template [opt] template-id
enum-key :: [opt] nested-name-specifier [opt] identifier
typename :: [opt] nested-name-specifier identifier
typename :: [opt] nested-name-specifier template [opt]
template-id
GNU extension:
elaborated-type-specifier:
class-key attributes :: [opt] nested-name-specifier [opt] identifier
class-key attributes :: [opt] nested-name-specifier [opt]
template [opt] template-id
enum attributes :: [opt] nested-name-specifier [opt] identifier
If IS_FRIEND is TRUE, then this elaborated-type-specifier is being
declared `friend'. If IS_DECLARATION is TRUE, then this
elaborated-type-specifier appears in a decl-specifiers-seq, i.e.,
something is being declared.
Returns the TYPE specified. */
static tree
cp_parser_elaborated_type_specifier (cp_parser* parser,
bool is_friend,
bool is_declaration)
{
enum tag_types tag_type;
tree identifier;
tree type = NULL_TREE;
tree attributes = NULL_TREE;
tree globalscope;
cp_token *token = NULL;
/* See if we're looking at the `enum' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM))
{
/* Consume the `enum' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's an enumeration type. */
tag_type = enum_type;
/* Issue a warning if the `struct' or `class' key (for C++0x scoped
enums) is used here. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
pedwarn (input_location, 0, "elaborated-type-specifier "
"for a scoped enum must not use the %<%D%> keyword",
cp_lexer_peek_token (parser->lexer)->u.value);
/* Consume the `struct' or `class' and parse it anyway. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Or, it might be `typename'. */
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TYPENAME))
{
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's a `typename' type. */
tag_type = typename_type;
}
/* Otherwise it must be a class-key. */
else
{
tag_type = cp_parser_class_key (parser);
if (tag_type == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Look for the `::' operator. */
globalscope = cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
if (tag_type == typename_type && !globalscope)
{
if (!cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration))
return error_mark_node;
}
else
/* Even though `typename' is not present, the proposed resolution
to Core Issue 180 says that in `class A<T>::B', `B' should be
considered a type-name, even if `A<T>' is dependent. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration);
/* For everything but enumeration types, consider a template-id.
For an enumeration type, consider only a plain identifier. */
if (tag_type != enum_type)
{
bool template_p = false;
tree decl;
/* Allow the `template' keyword. */
template_p = cp_parser_optional_template_keyword (parser);
/* If we didn't see `template', we don't know if there's a
template-id or not. */
if (!template_p)
cp_parser_parse_tentatively (parser);
/* Parse the template-id. */
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_template_id (parser, template_p,
/*check_dependency_p=*/true,
is_declaration);
/* If we didn't find a template-id, look for an ordinary
identifier. */
if (!template_p && !cp_parser_parse_definitely (parser))
;
/* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is
in effect, then we must assume that, upon instantiation, the
template will correspond to a class. */
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& tag_type == typename_type)
type = make_typename_type (parser->scope, decl,
typename_type,
/*complain=*/tf_error);
/* If the `typename' keyword is in effect and DECL is not a type
decl, then type is non existent. */
else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL)
;
else if (TREE_CODE (decl) == TYPE_DECL)
type = check_elaborated_type_specifier (tag_type, decl,
/*allow_template_p=*/true);
else if (decl == error_mark_node)
type = error_mark_node;
}
if (!type)
{
token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
{
parser->scope = NULL_TREE;
return error_mark_node;
}
/* For a `typename', we needn't call xref_tag. */
if (tag_type == typename_type
&& TREE_CODE (parser->scope) != NAMESPACE_DECL)
return cp_parser_make_typename_type (parser, parser->scope,
identifier,
token->location);
/* Look up a qualified name in the usual way. */
if (parser->scope)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
/* If the lookup was ambiguous, an error will already have been
issued. */
if (ambiguous_decls)
return error_mark_node;
/* If we are parsing friend declaration, DECL may be a
TEMPLATE_DECL tree node here. However, we need to check
whether this TEMPLATE_DECL results in valid code. Consider
the following example:
namespace N {
template <class T> class C {};
}
class X {
template <class T> friend class N::C; // #1, valid code
};
template <class T> class Y {
friend class N::C; // #2, invalid code
};
For both case #1 and #2, we arrive at a TEMPLATE_DECL after
name lookup of `N::C'. We see that friend declaration must
be template for the code to be valid. Note that
processing_template_decl does not work here since it is
always 1 for the above two cases. */
decl = (cp_parser_maybe_treat_template_as_class
(decl, /*tag_name_p=*/is_friend
&& parser->num_template_parameter_lists));
if (TREE_CODE (decl) != TYPE_DECL)
{
cp_parser_diagnose_invalid_type_name (parser,
parser->scope,
identifier,
token->location);
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE)
{
bool allow_template = (parser->num_template_parameter_lists
|| DECL_SELF_REFERENCE_P (decl));
type = check_elaborated_type_specifier (tag_type, decl,
allow_template);
if (type == error_mark_node)
return error_mark_node;
}
/* Forward declarations of nested types, such as
class C1::C2;
class C1::C2::C3;
are invalid unless all components preceding the final '::'
are complete. If all enclosing types are complete, these
declarations become merely pointless.
Invalid forward declarations of nested types are errors
caught elsewhere in parsing. Those that are pointless arrive
here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
&& !is_friend && !processing_explicit_instantiation)
warning (0, "declaration %qD does not declare anything", decl);
type = TREE_TYPE (decl);
}
else
{
/* An elaborated-type-specifier sometimes introduces a new type and
sometimes names an existing type. Normally, the rule is that it
introduces a new type only if there is not an existing type of
the same name already in scope. For example, given:
struct S {};
void f() { struct S s; }
the `struct S' in the body of `f' is the same `struct S' as in
the global scope; the existing definition is used. However, if
there were no global declaration, this would introduce a new
local class named `S'.
An exception to this rule applies to the following code:
namespace N { struct S; }
Here, the elaborated-type-specifier names a new type
unconditionally; even if there is already an `S' in the
containing scope this declaration names a new type.
This exception only applies if the elaborated-type-specifier
forms the complete declaration:
[class.name]
A declaration consisting solely of `class-key identifier ;' is
either a redeclaration of the name in the current scope or a
forward declaration of the identifier as a class name. It
introduces the name into the current scope.
We are in this situation precisely when the next token is a `;'.
An exception to the exception is that a `friend' declaration does
*not* name a new type; i.e., given:
struct S { friend struct T; };
`T' is not a new type in the scope of `S'.
Also, `new struct S' or `sizeof (struct S)' never results in the
definition of a new type; a new type can only be declared in a
declaration context. */
tag_scope ts;
bool template_p;
if (is_friend)
/* Friends have special name lookup rules. */
ts = ts_within_enclosing_non_class;
else if (is_declaration
&& cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
/* This is a `class-key identifier ;' */
ts = ts_current;
else
ts = ts_global;
template_p =
(parser->num_template_parameter_lists
&& (cp_parser_next_token_starts_class_definition_p (parser)
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)));
/* An unqualified name was used to reference this type, so
there were no qualifying templates. */
if (!cp_parser_check_template_parameters (parser,
/*num_templates=*/0,
token->location,
/*declarator=*/NULL))
return error_mark_node;
type = xref_tag (tag_type, identifier, ts, template_p);
}
}
if (type == error_mark_node)
return error_mark_node;
/* Allow attributes on forward declarations of classes. */
if (attributes)
{
if (TREE_CODE (type) == TYPENAME_TYPE)
warning (OPT_Wattributes,
"attributes ignored on uninstantiated type");
else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type)
&& ! processing_explicit_instantiation)
warning (OPT_Wattributes,
"attributes ignored on template instantiation");
else if (is_declaration && cp_parser_declares_only_class_p (parser))
cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
else
warning (OPT_Wattributes,
"attributes ignored on elaborated-type-specifier that is not a forward declaration");
}
if (tag_type != enum_type)
{
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type);
cp_parser_check_class_key (tag_type, type);
}
/* A "<" cannot follow an elaborated type specifier. If that
happens, the user was probably trying to form a template-id. */
cp_parser_check_for_invalid_template_id (parser, type, token->location);
return type;
}
/* Parse an enum-specifier.
enum-specifier:
enum-head { enumerator-list [opt] }
enum-head { enumerator-list , } [C++0x]
enum-head:
enum-key identifier [opt] enum-base [opt]
enum-key nested-name-specifier identifier enum-base [opt]
enum-key:
enum
enum class [C++0x]
enum struct [C++0x]
enum-base: [C++0x]
: type-specifier-seq
opaque-enum-specifier:
enum-key identifier enum-base [opt] ;
GNU Extensions:
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list [opt] }attributes[opt]
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list, }attributes[opt] [C++0x]
Returns an ENUM_TYPE representing the enumeration, or NULL_TREE
if the token stream isn't an enum-specifier after all. */
static tree
cp_parser_enum_specifier (cp_parser* parser)
{
tree identifier;
tree type = NULL_TREE;
tree prev_scope;
tree nested_name_specifier = NULL_TREE;
tree attributes;
bool scoped_enum_p = false;
bool has_underlying_type = false;
bool nested_being_defined = false;
bool new_value_list = false;
bool is_new_type = false;
bool is_anonymous = false;
tree underlying_type = NULL_TREE;
cp_token *type_start_token = NULL;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse tentatively so that we can back up if we don't find a
enum-specifier. */
cp_parser_parse_tentatively (parser);
/* Caller guarantees that the current token is 'enum', an identifier
possibly follows, and the token after that is an opening brace.
If we don't have an identifier, fabricate an anonymous name for
the enumeration being defined. */
cp_lexer_consume_token (parser->lexer);
/* Parse the "class" or "struct", which indicates a scoped
enumeration type in C++0x. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
if (cxx_dialect < cxx0x)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
/* Consume the `struct' or `class' token. */
cp_lexer_consume_token (parser->lexer);
scoped_enum_p = true;
}
attributes = cp_parser_attributes_opt (parser);
/* Clear the qualification. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Figure out in what scope the declaration is being placed. */
prev_scope = current_scope ();
type_start_token = cp_lexer_peek_token (parser->lexer);
push_deferring_access_checks (dk_no_check);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
if (nested_name_specifier)
{
tree name;
identifier = cp_parser_identifier (parser);
name = cp_parser_lookup_name (parser, identifier,
enum_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
input_location);
if (name)
{
type = TREE_TYPE (name);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
/* Are template enums allowed in ISO? */
if (template_parm_scope_p ())
pedwarn (type_start_token->location, OPT_pedantic,
"%qD is an enumeration template", name);
/* ignore a typename reference, for it will be solved by name
in start_enum. */
type = NULL_TREE;
}
}
else
error_at (type_start_token->location,
"%qD is not an enumerator-name", identifier);
}
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
{
identifier = make_anon_name ();
is_anonymous = true;
}
}
pop_deferring_access_checks ();
/* Check for the `:' that denotes a specified underlying type in C++0x.
Note that a ':' could also indicate a bitfield width, however. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_decl_specifier_seq type_specifiers;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
/* At this point this is surely not elaborated type specifier. */
if (!cp_parser_parse_definitely (parser))
return NULL_TREE;
if (cxx_dialect < cxx0x)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
has_underlying_type = true;
/* If that didn't work, stop. */
if (type_specifiers.type != error_mark_node)
{
underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME,
/*initialized=*/0, NULL);
if (underlying_type == error_mark_node)
underlying_type = NULL_TREE;
}
}
/* Look for the `{' but don't consume it yet. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
if (cxx_dialect < cxx0x || (!scoped_enum_p && !underlying_type))
{
cp_parser_error (parser, "expected %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
/* An opaque-enum-specifier must have a ';' here. */
if ((scoped_enum_p || underlying_type)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_error (parser, "expected %<;%> or %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
}
if (!has_underlying_type && !cp_parser_parse_definitely (parser))
return NULL_TREE;
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier);
TYPE_BEING_DEFINED (nested_name_specifier) = 1;
push_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
push_nested_namespace (nested_name_specifier);
}
}
/* Issue an error message if type-definitions are forbidden here. */
if (!cp_parser_check_type_definition (parser))
type = error_mark_node;
else
/* Create the new type. We do this before consuming the opening
brace so the enum will be recorded as being on the line of its
tag (or the 'enum' keyword, if there is no tag). */
type = start_enum (identifier, type, underlying_type,
scoped_enum_p, &is_new_type);
/* If the next token is not '{' it is an opaque-enum-specifier or an
elaborated-type-specifier. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
timevar_push (TV_PARSE_ENUM);
if (nested_name_specifier)
{
/* The following catches invalid code such as:
enum class S<int>::E { A, B, C }; */
if (!processing_specialization
&& CLASS_TYPE_P (nested_name_specifier)
&& CLASSTYPE_USE_TEMPLATE (nested_name_specifier))
error_at (type_start_token->location, "cannot add an enumerator "
"list to a template instantiation");
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (prev_scope && !is_ancestor (prev_scope, nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, prev_scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not enclose %qD",
type, prev_scope, nested_name_specifier);
type = error_mark_node;
}
}
if (scoped_enum_p)
begin_scope (sk_scoped_enum, type);
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
if (type == error_mark_node)
; /* Nothing to add */
else if (OPAQUE_ENUM_P (type)
|| (cxx_dialect > cxx98 && processing_specialization))
{
new_value_list = true;
SET_OPAQUE_ENUM_P (type, false);
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
}
else
{
error_at (type_start_token->location, "multiple definition of %q#T", type);
error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
"previous definition here");
type = error_mark_node;
}
if (type == error_mark_node)
cp_parser_skip_to_end_of_block_or_statement (parser);
/* If the next token is not '}', then there are some enumerators. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
cp_parser_enumerator_list (parser, type);
/* Consume the final '}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (scoped_enum_p)
finish_scope ();
timevar_pop (TV_PARSE_ENUM);
}
else
{
/* If a ';' follows, then it is an opaque-enum-specifier
and additional restrictions apply. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
if (is_anonymous)
error_at (type_start_token->location,
"opaque-enum-specifier without name");
else if (nested_name_specifier)
error_at (type_start_token->location,
"opaque-enum-specifier must use a simple identifier");
}
}
/* Look for trailing attributes to apply to this enumeration, and
apply them if appropriate. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
tree trailing_attr = cp_parser_attributes_opt (parser);
trailing_attr = chainon (trailing_attr, attributes);
cplus_decl_attributes (&type,
trailing_attr,
(int) ATTR_FLAG_TYPE_IN_PLACE);
}
/* Finish up the enumeration. */
if (type != error_mark_node)
{
if (new_value_list)
finish_enum_value_list (type);
if (is_new_type)
finish_enum (type);
}
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined;
pop_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
pop_nested_namespace (nested_name_specifier);
}
}
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse an enumerator-list. The enumerators all have the indicated
TYPE.
enumerator-list:
enumerator-definition
enumerator-list , enumerator-definition */
static void
cp_parser_enumerator_list (cp_parser* parser, tree type)
{
while (true)
{
/* Parse an enumerator-definition. */
cp_parser_enumerator_definition (parser, type);
/* If the next token is not a ',', we've reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
/* If the next token is a `}', there is a trailing comma. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
if (cxx_dialect < cxx0x && !in_system_header)
pedwarn (input_location, OPT_pedantic,
"comma at end of enumerator list");
break;
}
}
}
/* Parse an enumerator-definition. The enumerator has the indicated
TYPE.
enumerator-definition:
enumerator
enumerator = constant-expression
enumerator:
identifier */
static void
cp_parser_enumerator_definition (cp_parser* parser, tree type)
{
tree identifier;
tree value;
location_t loc;
/* Save the input location because we are interested in the location
of the identifier and not the location of the explicit value. */
loc = cp_lexer_peek_token (parser->lexer)->location;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* If the next token is an '=', then there is an explicit value. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the value. */
value = cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/false,
NULL);
}
else
value = NULL_TREE;
/* If we are processing a template, make sure the initializer of the
enumerator doesn't contain any bare template parameter pack. */
if (check_for_bare_parameter_packs (value))
value = error_mark_node;
/* integral_constant_value will pull out this expression, so make sure
it's folded as appropriate. */
value = fold_non_dependent_expr (value);
/* Create the enumerator. */
build_enumerator (identifier, value, type, loc);
}
/* Parse a namespace-name.
namespace-name:
original-namespace-name
namespace-alias
Returns the NAMESPACE_DECL for the namespace. */
static tree
cp_parser_namespace_name (cp_parser* parser)
{
tree identifier;
tree namespace_decl;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Get the name of the namespace. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the identifier in the currently active scope. Look only
for namespaces, due to:
[basic.lookup.udir]
When looking up a namespace-name in a using-directive or alias
definition, only namespace names are considered.
And:
[basic.lookup.qual]
During the lookup of a name preceding the :: scope resolution
operator, object, function, and enumerator names are ignored.
(Note that cp_parser_qualifying_entity only calls this
function if the token after the name is the scope resolution
operator.) */
namespace_decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/false,
/*is_namespace=*/true,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* If it's not a namespace, issue an error. */
if (namespace_decl == error_mark_node
|| TREE_CODE (namespace_decl) != NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location, "%qD is not a namespace-name", identifier);
cp_parser_error (parser, "expected namespace-name");
namespace_decl = error_mark_node;
}
return namespace_decl;
}
/* Parse a namespace-definition.
namespace-definition:
named-namespace-definition
unnamed-namespace-definition
named-namespace-definition:
original-namespace-definition
extension-namespace-definition
original-namespace-definition:
namespace identifier { namespace-body }
extension-namespace-definition:
namespace original-namespace-name { namespace-body }
unnamed-namespace-definition:
namespace { namespace-body } */
static void
cp_parser_namespace_definition (cp_parser* parser)
{
tree identifier, attribs;
bool has_visibility;
bool is_inline;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE))
{
maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES);
is_inline = true;
cp_lexer_consume_token (parser->lexer);
}
else
is_inline = false;
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Get the name of the namespace. We do not attempt to distinguish
between an original-namespace-definition and an
extension-namespace-definition at this point. The semantic
analysis routines are responsible for that. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Parse any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Look for the `{' to start the namespace. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* Start the namespace. */
push_namespace (identifier);
/* "inline namespace" is equivalent to a stub namespace definition
followed by a strong using directive. */
if (is_inline)
{
tree name_space = current_namespace;
/* Set up namespace association. */
DECL_NAMESPACE_ASSOCIATIONS (name_space)
= tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE,
DECL_NAMESPACE_ASSOCIATIONS (name_space));
/* Import the contents of the inline namespace. */
pop_namespace ();
do_using_directive (name_space);
push_namespace (identifier);
}
has_visibility = handle_namespace_attrs (current_namespace, attribs);
/* Parse the body of the namespace. */
cp_parser_namespace_body (parser);
if (has_visibility)
pop_visibility (1);
/* Finish the namespace. */
pop_namespace ();
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Parse a namespace-body.
namespace-body:
declaration-seq [opt] */
static void
cp_parser_namespace_body (cp_parser* parser)
{
cp_parser_declaration_seq_opt (parser);
}
/* Parse a namespace-alias-definition.
namespace-alias-definition:
namespace identifier = qualified-namespace-specifier ; */
static void
cp_parser_namespace_alias_definition (cp_parser* parser)
{
tree identifier;
tree namespace_specifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* Look for the `=' token. */
if (!cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
error_at (token->location, "%<namespace%> definition is not allowed here");
/* Skip the definition. */
cp_lexer_consume_token (parser->lexer);
if (cp_parser_skip_to_closing_brace (parser))
cp_lexer_consume_token (parser->lexer);
return;
}
cp_parser_require (parser, CPP_EQ, RT_EQ);
/* Look for the qualified-namespace-specifier. */
namespace_specifier
= cp_parser_qualified_namespace_specifier (parser);
/* Look for the `;' token. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Register the alias in the symbol table. */
do_namespace_alias (identifier, namespace_specifier);
}
/* Parse a qualified-namespace-specifier.
qualified-namespace-specifier:
:: [opt] nested-name-specifier [opt] namespace-name
Returns a NAMESPACE_DECL corresponding to the specified
namespace. */
static tree
cp_parser_qualified_namespace_specifier (cp_parser* parser)
{
/* Look for the optional `::'. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
return cp_parser_namespace_name (parser);
}
/* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an
access declaration.
using-declaration:
using typename [opt] :: [opt] nested-name-specifier unqualified-id ;
using :: unqualified-id ;
access-declaration:
qualified-id ;
*/
static bool
cp_parser_using_declaration (cp_parser* parser,
bool access_declaration_p)
{
cp_token *token;
bool typename_p = false;
bool global_scope_p;
tree decl;
tree identifier;
tree qscope;
int oldcount = errorcount;
cp_token *diag_token = NULL;
if (access_declaration_p)
{
diag_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
}
else
{
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's `typename'. */
if (token->keyword == RID_TYPENAME)
{
/* Remember that we've seen it. */
typename_p = true;
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Look for the optional global scope qualification. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* If we saw `typename', or didn't see `::', then there must be a
nested-name-specifier present. */
if (typename_p || !global_scope_p)
qscope = cp_parser_nested_name_specifier (parser, typename_p,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could be in either of the two productions. In that
case, treat the nested-name-specifier as optional. */
else
qscope = cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
if (!qscope)
qscope = global_namespace;
if (access_declaration_p && cp_parser_error_occurred (parser))
/* Something has already gone wrong; there's no need to parse
further. Since an error has occurred, the return value of
cp_parser_parse_definitely will be false, as required. */
return cp_parser_parse_definitely (parser);
token = cp_lexer_peek_token (parser->lexer);
/* Parse the unqualified-id. */
identifier = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*declarator_p=*/true,
/*optional_p=*/false);
if (access_declaration_p)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
return false;
}
/* The function we call to handle a using-declaration is different
depending on what scope we are in. */
if (qscope == error_mark_node || identifier == error_mark_node)
;
else if (TREE_CODE (identifier) != IDENTIFIER_NODE
&& TREE_CODE (identifier) != BIT_NOT_EXPR)
/* [namespace.udecl]
A using declaration shall not name a template-id. */
error_at (token->location,
"a template-id may not appear in a using-declaration");
else
{
if (at_class_scope_p ())
{
/* Create the USING_DECL. */
decl = do_class_using_decl (parser->scope, identifier);
if (decl && typename_p)
USING_DECL_TYPENAME_P (decl) = 1;
if (check_for_bare_parameter_packs (decl))
return false;
else
/* Add it to the list of members in this class. */
finish_member_declaration (decl);
}
else
{
decl = cp_parser_lookup_name_simple (parser,
identifier,
token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, identifier,
decl, NLE_NULL,
token->location);
else if (check_for_bare_parameter_packs (decl))
return false;
else if (!at_namespace_scope_p ())
do_local_using_decl (decl, qscope, identifier);
else
do_toplevel_using_decl (decl, qscope, identifier);
}
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (access_declaration_p && errorcount == oldcount)
warning_at (diag_token->location, OPT_Wdeprecated,
"access declarations are deprecated "
"in favour of using-declarations; "
"suggestion: add the %<using%> keyword");
return true;
}
/* Parse an alias-declaration.
alias-declaration:
using identifier attribute-specifier-seq [opt] = type-id */
static tree
cp_parser_alias_declaration (cp_parser* parser)
{
tree id, type, decl, pushed_scope = NULL_TREE, attributes;
location_t id_location;
cp_declarator *declarator;
cp_decl_specifier_seq decl_specs;
bool member_p;
const char *saved_message = NULL;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
id_location = cp_lexer_peek_token (parser->lexer)->location;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
return error_mark_node;
attributes = cp_parser_attributes_opt (parser);
if (attributes == error_mark_node)
return error_mark_node;
cp_parser_require (parser, CPP_EQ, RT_EQ);
if (cp_parser_error_occurred (parser))
return error_mark_node;
/* Now we are going to parse the type-id of the declaration. */
/*
[dcl.type]/3 says:
"A type-specifier-seq shall not define a class or enumeration
unless it appears in the type-id of an alias-declaration (7.1.3) that
is not the declaration of a template-declaration."
In other words, if we currently are in an alias template, the
type-id should not define a type.
So let's set parser->type_definition_forbidden_message in that
case; cp_parser_check_type_definition (called by
cp_parser_class_specifier) will then emit an error if a type is
defined in the type-id. */
if (parser->num_template_parameter_lists)
{
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message =
G_("types may not be defined in alias template declarations");
}
type = cp_parser_type_id (parser);
/* Restore the error message if need be. */
if (parser->num_template_parameter_lists)
parser->type_definition_forbidden_message = saved_message;
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (cp_parser_error_occurred (parser))
return error_mark_node;
/* A typedef-name can also be introduced by an alias-declaration. The
identifier following the using keyword becomes a typedef-name. It has
the same semantics as if it were introduced by the typedef
specifier. In particular, it does not define a new type and it shall
not appear in the type-id. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
decl_specs.attributes = attributes;
++decl_specs.specs[(int) ds_typedef];
++decl_specs.specs[(int) ds_alias];
declarator = make_id_declarator (NULL_TREE, id, sfk_none);
declarator->id_loc = id_location;
member_p = at_class_scope_p ();
if (member_p)
decl = grokfield (declarator, &decl_specs, NULL_TREE, false,
NULL_TREE, attributes);
else
decl = start_decl (declarator, &decl_specs, 0,
attributes, NULL_TREE, &pushed_scope);
if (decl == error_mark_node)
return decl;
cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0);
if (pushed_scope)
pop_scope (pushed_scope);
/* If decl is a template, return its TEMPLATE_DECL so that it gets
added into the symbol table; otherwise, return the TYPE_DECL. */
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)))
{
decl = DECL_TI_TEMPLATE (decl);
if (member_p)
check_member_template (decl);
}
return decl;
}
/* Parse a using-directive.
using-directive:
using namespace :: [opt] nested-name-specifier [opt]
namespace-name ; */
static void
cp_parser_using_directive (cp_parser* parser)
{
tree namespace_decl;
tree attribs;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* And the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* And the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Get the namespace being used. */
namespace_decl = cp_parser_namespace_name (parser);
/* And any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Update the symbol table. */
parse_using_directive (namespace_decl, attribs);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Parse an asm-definition.
asm-definition:
asm ( string-literal ) ;
GNU Extension:
asm-definition:
asm volatile [opt] ( string-literal ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt]
: asm-clobber-list [opt] ) ;
asm volatile [opt] goto ( string-literal : : asm-operand-list [opt]
: asm-clobber-list [opt]
: asm-goto-list ) ; */
static void
cp_parser_asm_definition (cp_parser* parser)
{
tree string;
tree outputs = NULL_TREE;
tree inputs = NULL_TREE;
tree clobbers = NULL_TREE;
tree labels = NULL_TREE;
tree asm_stmt;
bool volatile_p = false;
bool extended_p = false;
bool invalid_inputs_p = false;
bool invalid_outputs_p = false;
bool goto_p = false;
required_token missing = RT_NONE;
/* Look for the `asm' keyword. */
cp_parser_require_keyword (parser, RID_ASM, RT_ASM);
/* See if the next token is `volatile'. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE))
{
/* Remember that we saw the `volatile' keyword. */
volatile_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO))
{
/* Remember that we saw the `goto' keyword. */
goto_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
/* Look for the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return;
/* Look for the string. */
string = cp_parser_string_literal (parser, false, false);
if (string == error_mark_node)
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return;
}
/* If we're allowing GNU extensions, check for the extended assembly
syntax. Unfortunately, the `:' tokens need not be separated by
a space in C, and so, for compatibility, we tolerate that here
too. Doing that means that we have to treat the `::' operator as
two `:' tokens. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& (cp_lexer_next_token_is (parser->lexer, CPP_COLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)))
{
bool inputs_p = false;
bool clobbers_p = false;
bool labels_p = false;
/* The extended syntax was used. */
extended_p = true;
/* Look for outputs. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN)
&& !goto_p)
outputs = cp_parser_asm_operand_list (parser);
if (outputs == error_mark_node)
invalid_outputs_p = true;
}
/* If the next token is `::', there are no outputs, and the
next token is the beginning of the inputs. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The inputs are coming next. */
inputs_p = true;
/* Look for inputs. */
if (inputs_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
inputs = cp_parser_asm_operand_list (parser);
if (inputs == error_mark_node)
invalid_inputs_p = true;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The clobbers are coming next. */
clobbers_p = true;
/* Look for clobbers. */
if (clobbers_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
clobbers_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the clobbers. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
clobbers = cp_parser_asm_clobber_list (parser);
}
else if (goto_p
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The labels are coming next. */
labels_p = true;
/* Look for labels. */
if (labels_p
|| (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON)))
{
labels_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the labels. */
labels = cp_parser_asm_label_list (parser);
}
if (goto_p && !labels_p)
missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE;
}
else if (goto_p)
missing = RT_COLON_SCOPE;
/* Look for the closing `)'. */
if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN,
missing ? missing : RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!invalid_inputs_p && !invalid_outputs_p)
{
/* Create the ASM_EXPR. */
if (parser->in_function_body)
{
asm_stmt = finish_asm_stmt (volatile_p, string, outputs,
inputs, clobbers, labels);
/* If the extended syntax was not used, mark the ASM_EXPR. */
if (!extended_p)
{
tree temp = asm_stmt;
if (TREE_CODE (temp) == CLEANUP_POINT_EXPR)
temp = TREE_OPERAND (temp, 0);
ASM_INPUT_P (temp) = 1;
}
}
else
cgraph_add_asm_node (string);
}
}
/* Declarators [gram.dcl.decl] */
/* Parse an init-declarator.
init-declarator:
declarator initializer [opt]
GNU Extension:
init-declarator:
declarator asm-specification [opt] attributes [opt] initializer [opt]
function-definition:
decl-specifier-seq [opt] declarator ctor-initializer [opt]
function-body
decl-specifier-seq [opt] declarator function-try-block
GNU Extension:
function-definition:
__extension__ function-definition
TM Extension:
function-definition:
decl-specifier-seq [opt] declarator function-transaction-block
The DECL_SPECIFIERS apply to this declarator. Returns a
representation of the entity declared. If MEMBER_P is TRUE, then
this declarator appears in a class scope. The new DECL created by
this declarator is returned.
The CHECKS are access checks that should be performed once we know
what entity is being declared (and, therefore, what classes have
befriended it).
If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and
for a function-definition here as well. If the declarator is a
declarator for a function-definition, *FUNCTION_DEFINITION_P will
be TRUE upon return. By that point, the function-definition will
have been completely parsed.
FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P
is FALSE.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. If returned, this declarator will be
created with SD_INITIALIZED but will not call cp_finish_decl. */
static tree
cp_parser_init_declarator (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
VEC (deferred_access_check,gc)* checks,
bool function_definition_allowed_p,
bool member_p,
int declares_class_or_enum,
bool* function_definition_p,
tree* maybe_range_for_decl)
{
cp_token *token = NULL, *asm_spec_start_token = NULL,
*attributes_start_token = NULL;
cp_declarator *declarator;
tree prefix_attributes;
tree attributes;
tree asm_specification;
tree initializer;
tree decl = NULL_TREE;
tree scope;
int is_initialized;
/* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if
initialized with "= ..", CPP_OPEN_PAREN if initialized with
"(...)". */
enum cpp_ttype initialization_kind;
bool is_direct_init = false;
bool is_non_constant_init;
int ctor_dtor_or_conv_p;
bool friend_p;
tree pushed_scope = NULL_TREE;
bool range_for_decl_p = false;
/* Gather the attributes that were provided with the
decl-specifiers. */
prefix_attributes = decl_specifiers->attributes;
/* Assume that this is not the declarator for a function
definition. */
if (function_definition_p)
*function_definition_p = false;
/* Defer access checks while parsing the declarator; we cannot know
what names are accessible until we know what is being
declared. */
resume_deferring_access_checks ();
/* Parse the declarator. */
token = cp_lexer_peek_token (parser->lexer);
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p);
/* Gather up the deferred checks. */
stop_deferring_access_checks ();
/* If the DECLARATOR was erroneous, there's no need to go
further. */
if (declarator == cp_error_declarator)
return error_mark_node;
/* Check that the number of template-parameter-lists is OK. */
if (!cp_parser_check_declarator_template_parameters (parser, declarator,
token->location))
return error_mark_node;
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers->type,
decl_specifiers->type_location);
/* Figure out what scope the entity declared by the DECLARATOR is
located in. `grokdeclarator' sometimes changes the scope, so
we compute it now. */
scope = get_scope_of_declarator (declarator);
/* Perform any lookups in the declared type which were thought to be
dependent, but are not in the scope of the declarator. */
decl_specifiers->type
= maybe_update_decl_type (decl_specifiers->type, scope);
/* If we're allowing GNU extensions, look for an asm-specification
and attributes. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
/* Look for an asm-specification. */
asm_spec_start_token = cp_lexer_peek_token (parser->lexer);
asm_specification = cp_parser_asm_specification_opt (parser);
/* And attributes. */
attributes_start_token = cp_lexer_peek_token (parser->lexer);
attributes = cp_parser_attributes_opt (parser);
}
else
{
asm_specification = NULL_TREE;
attributes = NULL_TREE;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check to see if the token indicates the start of a
function-definition. */
if (function_declarator_p (declarator)
&& cp_parser_token_starts_function_definition_p (token))
{
if (!function_definition_allowed_p)
{
/* If a function-definition should not appear here, issue an
error message. */
cp_parser_error (parser,
"a function-definition is not allowed here");
return error_mark_node;
}
else
{
location_t func_brace_location
= cp_lexer_peek_token (parser->lexer)->location;
/* Neither attributes nor an asm-specification are allowed
on a function-definition. */
if (asm_specification)
error_at (asm_spec_start_token->location,
"an asm-specification is not allowed "
"on a function-definition");
if (attributes)
error_at (attributes_start_token->location,
"attributes are not allowed on a function-definition");
/* This is a function-definition. */
*function_definition_p = true;
/* Parse the function definition. */
if (member_p)
decl = cp_parser_save_member_function_body (parser,
decl_specifiers,
declarator,
prefix_attributes);
else
decl
= (cp_parser_function_definition_from_specifiers_and_declarator
(parser, decl_specifiers, prefix_attributes, declarator));
if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl))
{
/* This is where the prologue starts... */
DECL_STRUCT_FUNCTION (decl)->function_start_locus
= func_brace_location;
}
return decl;
}
}
/* [dcl.dcl]
Only in function declarations for constructors, destructors, and
type conversions can the decl-specifier-seq be omitted.
We explicitly postpone this check past the point where we handle
function-definitions because we tolerate function-definitions
that are missing their return types in some modes. */
if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0)
{
cp_parser_error (parser,
"expected constructor, destructor, or type conversion");
return error_mark_node;
}
/* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */
if (token->type == CPP_EQ
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_BRACE)
{
is_initialized = SD_INITIALIZED;
initialization_kind = token->type;
if (maybe_range_for_decl)
*maybe_range_for_decl = error_mark_node;
if (token->type == CPP_EQ
&& function_declarator_p (declarator))
{
cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (t2->keyword == RID_DEFAULT)
is_initialized = SD_DEFAULTED;
else if (t2->keyword == RID_DELETE)
is_initialized = SD_DELETED;
}
}
else
{
/* If the init-declarator isn't initialized and isn't followed by a
`,' or `;', it's not a valid init-declarator. */
if (token->type != CPP_COMMA
&& token->type != CPP_SEMICOLON)
{
if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node)
range_for_decl_p = true;
else
{
cp_parser_error (parser, "expected initializer");
return error_mark_node;
}
}
is_initialized = SD_UNINITIALIZED;
initialization_kind = CPP_EOF;
}
/* Because start_decl has side-effects, we should only call it if we
know we're going ahead. By this point, we know that we cannot
possibly be looking at any other construct. */
cp_parser_commit_to_tentative_parse (parser);
/* If the decl specifiers were bad, issue an error now that we're
sure this was intended to be a declarator. Then continue
declaring the variable(s), as int, to try to cut down on further
errors. */
if (decl_specifiers->any_specifiers_p
&& decl_specifiers->type == error_mark_node)
{
cp_parser_error (parser, "invalid type in declaration");
decl_specifiers->type = integer_type_node;
}
/* Check to see whether or not this declaration is a friend. */
friend_p = cp_parser_friend_p (decl_specifiers);
/* Enter the newly declared entry in the symbol table. If we're
processing a declaration in a class-specifier, we wait until
after processing the initializer. */
if (!member_p)
{
if (parser->in_unbraced_linkage_specification_p)
decl_specifiers->storage_class = sc_extern;
decl = start_decl (declarator, decl_specifiers,
range_for_decl_p? SD_INITIALIZED : is_initialized,
attributes, prefix_attributes,
&pushed_scope);
/* Adjust location of decl if declarator->id_loc is more appropriate:
set, and decl wasn't merged with another decl, in which case its
location would be different from input_location, and more accurate. */
if (DECL_P (decl)
&& declarator->id_loc != UNKNOWN_LOCATION
&& DECL_SOURCE_LOCATION (decl) == input_location)
DECL_SOURCE_LOCATION (decl) = declarator->id_loc;
}
else if (scope)
/* Enter the SCOPE. That way unqualified names appearing in the
initializer will be looked up in SCOPE. */
pushed_scope = push_scope (scope);
/* Perform deferred access control checks, now that we know in which
SCOPE the declared entity resides. */
if (!member_p && decl)
{
tree saved_current_function_decl = NULL_TREE;
/* If the entity being declared is a function, pretend that we
are in its scope. If it is a `friend', it may have access to
things that would not otherwise be accessible. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
saved_current_function_decl = current_function_decl;
current_function_decl = decl;
}
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
/* Perform the access control checks for the declarator and the
decl-specifiers. */
perform_deferred_access_checks ();
/* Restore the saved value. */
if (TREE_CODE (decl) == FUNCTION_DECL)
current_function_decl = saved_current_function_decl;
}
/* Parse the initializer. */
initializer = NULL_TREE;
is_direct_init = false;
is_non_constant_init = true;
if (is_initialized)
{
if (function_declarator_p (declarator))
{
cp_token *initializer_start_token = cp_lexer_peek_token (parser->lexer);
if (initialization_kind == CPP_EQ)
initializer = cp_parser_pure_specifier (parser);
else
{
/* If the declaration was erroneous, we don't really
know what the user intended, so just silently
consume the initializer. */
if (decl != error_mark_node)
error_at (initializer_start_token->location,
"initializer provided for function");
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
else
{
/* We want to record the extra mangling scope for in-class
initializers of class members and initializers of static data
member templates. The former involves deferring
parsing of the initializer until end of class as with default
arguments. So right here we only handle the latter. */
if (!member_p && processing_template_decl)
start_lambda_scope (decl);
initializer = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (!member_p && processing_template_decl)
finish_lambda_scope ();
}
}
/* The old parser allows attributes to appear after a parenthesized
initializer. Mark Mitchell proposed removing this functionality
on the GCC mailing lists on 2002-08-13. This parser accepts the
attributes -- but ignores them. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& initialization_kind == CPP_OPEN_PAREN)
if (cp_parser_attributes_opt (parser))
warning (OPT_Wattributes,
"attributes after parenthesized initializer ignored");
/* For an in-class declaration, use `grokfield' to create the
declaration. */
if (member_p)
{
if (pushed_scope)
{
pop_scope (pushed_scope);
pushed_scope = NULL_TREE;
}
decl = grokfield (declarator, decl_specifiers,
initializer, !is_non_constant_init,
/*asmspec=*/NULL_TREE,
prefix_attributes);
if (decl && TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
}
/* Finish processing the declaration. But, skip member
declarations. */
if (!member_p && decl && decl != error_mark_node && !range_for_decl_p)
{
cp_finish_decl (decl,
initializer, !is_non_constant_init,
asm_specification,
/* If the initializer is in parentheses, then this is
a direct-initialization, which means that an
`explicit' constructor is OK. Otherwise, an
`explicit' constructor cannot be used. */
((is_direct_init || !is_initialized)
? LOOKUP_NORMAL : LOOKUP_IMPLICIT));
}
else if ((cxx_dialect != cxx98) && friend_p
&& decl && TREE_CODE (decl) == FUNCTION_DECL)
/* Core issue #226 (C++0x only): A default template-argument
shall not be specified in a friend class template
declaration. */
check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/1,
/*is_partial=*/0, /*is_friend_decl=*/1);
if (!friend_p && pushed_scope)
pop_scope (pushed_scope);
return decl;
}
/* Parse a declarator.
declarator:
direct-declarator
ptr-operator declarator
abstract-declarator:
ptr-operator abstract-declarator [opt]
direct-abstract-declarator
GNU Extensions:
declarator:
attributes [opt] direct-declarator
attributes [opt] ptr-operator declarator
abstract-declarator:
attributes [opt] ptr-operator abstract-declarator [opt]
attributes [opt] direct-abstract-declarator
If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to
detect constructor, destructor or conversion operators. It is set
to -1 if the declarator is a name, and +1 if it is a
function. Otherwise it is set to zero. Usually you just want to
test for >0, but internally the negative value is used.
(The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have
a decl-specifier-seq unless it declares a constructor, destructor,
or conversion. It might seem that we could check this condition in
semantic analysis, rather than parsing, but that makes it difficult
to handle something like `f()'. We want to notice that there are
no decl-specifiers, and therefore realize that this is an
expression, not a declaration.)
If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff
the declarator is a direct-declarator of the form "(...)".
MEMBER_P is true iff this declarator is a member-declarator. */
static cp_declarator *
cp_parser_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool* parenthesized_p,
bool member_p)
{
cp_declarator *declarator;
enum tree_code code;
cp_cv_quals cv_quals;
tree class_type;
tree attributes = NULL_TREE;
/* Assume this is not a constructor, destructor, or type-conversion
operator. */
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
/* Check for the ptr-operator production. */
cp_parser_parse_tentatively (parser);
/* Parse the ptr-operator. */
code = cp_parser_ptr_operator (parser,
&class_type,
&cv_quals);
/* If that worked, then we have a ptr-operator. */
if (cp_parser_parse_definitely (parser))
{
/* If a ptr-operator was found, then this declarator was not
parenthesized. */
if (parenthesized_p)
*parenthesized_p = true;
/* The dependent declarator is optional if we are parsing an
abstract-declarator. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED)
cp_parser_parse_tentatively (parser);
/* Parse the dependent declarator. */
declarator = cp_parser_declarator (parser, dcl_kind,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* If we are parsing an abstract-declarator, we must handle the
case where the dependent declarator is absent. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED
&& !cp_parser_parse_definitely (parser))
declarator = NULL;
declarator = cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator);
}
/* Everything else is a direct-declarator. */
else
{
if (parenthesized_p)
*parenthesized_p = cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN);
declarator = cp_parser_direct_declarator (parser, dcl_kind,
ctor_dtor_or_conv_p,
member_p);
}
if (attributes && declarator && declarator != cp_error_declarator)
declarator->attributes = attributes;
return declarator;
}
/* Parse a direct-declarator or direct-abstract-declarator.
direct-declarator:
declarator-id
direct-declarator ( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-declarator [ constant-expression [opt] ]
( declarator )
direct-abstract-declarator:
direct-abstract-declarator [opt]
( parameter-declaration-clause )
cv-qualifier-seq [opt]
exception-specification [opt]
direct-abstract-declarator [opt] [ constant-expression [opt] ]
( abstract-declarator )
Returns a representation of the declarator. DCL_KIND is
CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a
direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if
we are parsing a direct-declarator. It is
CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case
of ambiguity we prefer an abstract declarator, as per
[dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for
cp_parser_declarator. */
static cp_declarator *
cp_parser_direct_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool member_p)
{
cp_token *token;
cp_declarator *declarator = NULL;
tree scope = NULL_TREE;
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
bool saved_in_declarator_p = parser->in_declarator_p;
bool first = true;
tree pushed_scope = NULL_TREE;
while (true)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_PAREN)
{
/* This is either a parameter-declaration-clause, or a
parenthesized declarator. When we know we are parsing a
named declarator, it must be a parenthesized declarator
if FIRST is true. For instance, `(int)' is a
parameter-declaration-clause, with an omitted
direct-abstract-declarator. But `((*))', is a
parenthesized abstract declarator. Finally, when T is a
template parameter `(T)' is a
parameter-declaration-clause, and not a parenthesized
named declarator.
We first try and parse a parameter-declaration-clause,
and then try a nested declarator (if FIRST is true).
It is not an error for it not to be a
parameter-declaration-clause, even when FIRST is
false. Consider,
int i (int);
int i (3);
The first is the declaration of a function while the
second is the definition of a variable, including its
initializer.
Having seen only the parenthesis, we cannot know which of
these two alternatives should be selected. Even more
complex are examples like:
int i (int (a));
int i (int (3));
The former is a function-declaration; the latter is a
variable initialization.
Thus again, we try a parameter-declaration-clause, and if
that fails, we back out and return. */
if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
{
tree params;
unsigned saved_num_template_parameter_lists;
bool is_declarator = false;
tree t;
/* In a member-declarator, the only valid interpretation
of a parenthesis is the start of a
parameter-declaration-clause. (It is invalid to
initialize a static data member with a parenthesized
initializer; only the "=" form of initialization is
permitted.) */
if (!member_p)
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
if (first)
{
/* If this is going to be an abstract declarator, we're
in a declarator and we can't have default args. */
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
}
/* Inside the function parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
begin_scope (sk_function_parms, NULL_TREE);
/* Parse the parameter-declaration-clause. */
params = cp_parser_parameter_declaration_clause (parser);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Consume the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, parse the cv-qualifier-seq and the
exception-specification. */
if (member_p || cp_parser_parse_definitely (parser))
{
cp_cv_quals cv_quals;
cp_virt_specifiers virt_specifiers;
tree exception_specification;
tree late_return;
is_declarator = true;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0;
first = false;
/* Parse the cv-qualifier-seq. */
cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
/* And the exception-specification. */
exception_specification
= cp_parser_exception_specification_opt (parser);
/* Parse the virt-specifier-seq. */
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
late_return = (cp_parser_late_return_type_opt
(parser, member_p ? cv_quals : -1));
/* Create the function-declarator. */
declarator = make_call_declarator (declarator,
params,
cv_quals,
virt_specifiers,
exception_specification,
late_return);
/* Any subsequent parameter lists are to do with
return type, so are not those of the declared
function. */
parser->default_arg_ok_p = false;
}
/* Remove the function parms from scope. */
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope();
if (is_declarator)
/* Repeat the main loop. */
continue;
}
/* If this is the first, we can try a parenthesized
declarator. */
if (first)
{
bool saved_in_type_id_in_expr_p;
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the nested declarator. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
declarator
= cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
first = false;
/* Expect a `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
declarator = cp_error_declarator;
if (declarator == cp_error_declarator)
break;
goto handle_declarator;
}
/* Otherwise, we must be done. */
else
break;
}
else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
&& token->type == CPP_OPEN_SQUARE)
{
/* Parse an array-declarator. */
tree bounds;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
first = false;
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `]', then there is no
constant-expression. */
if (token->type != CPP_CLOSE_SQUARE)
{
bool non_constant_p;
bounds
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/true,
&non_constant_p);
if (!non_constant_p)
/* OK */;
else if (error_operand_p (bounds))
/* Already gave an error. */;
else if (!parser->in_function_body
|| current_binding_level->kind == sk_function_parms)
{
/* Normally, the array bound must be an integral constant
expression. However, as an extension, we allow VLAs
in function scopes as long as they aren't part of a
parameter declaration. */
cp_parser_error (parser,
"array bound is not an integer constant");
bounds = error_mark_node;
}
else if (processing_template_decl)
{
/* Remember this wasn't a constant-expression. */
bounds = build_nop (TREE_TYPE (bounds), bounds);
TREE_SIDE_EFFECTS (bounds) = 1;
}
}
else
bounds = NULL_TREE;
/* Look for the closing `]'. */
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE))
{
declarator = cp_error_declarator;
break;
}
declarator = make_array_declarator (declarator, bounds);
}
else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT)
{
{
tree qualifying_scope;
tree unqualified_name;
special_function_kind sfk;
bool abstract_ok;
bool pack_expansion_p = false;
cp_token *declarator_id_start_token;
/* Parse a declarator-id */
abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER);
if (abstract_ok)
{
cp_parser_parse_tentatively (parser);
/* If we see an ellipsis, we should be looking at a
parameter pack. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
}
declarator_id_start_token = cp_lexer_peek_token (parser->lexer);
unqualified_name
= cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok);
qualifying_scope = parser->scope;
if (abstract_ok)
{
bool okay = false;
if (!unqualified_name && pack_expansion_p)
{
/* Check whether an error occurred. */
okay = !cp_parser_error_occurred (parser);
/* We already consumed the ellipsis to mark a
parameter pack, but we have no way to report it,
so abort the tentative parse. We will be exiting
immediately anyway. */
cp_parser_abort_tentative_parse (parser);
}
else
okay = cp_parser_parse_definitely (parser);
if (!okay)
unqualified_name = error_mark_node;
else if (unqualified_name
&& (qualifying_scope
|| (TREE_CODE (unqualified_name)
!= IDENTIFIER_NODE)))
{
cp_parser_error (parser, "expected unqualified-id");
unqualified_name = error_mark_node;
}
}
if (!unqualified_name)
return NULL;
if (unqualified_name == error_mark_node)
{
declarator = cp_error_declarator;
pack_expansion_p = false;
declarator->parameter_pack_p = false;
break;
}
if (qualifying_scope && at_namespace_scope_p ()
&& TREE_CODE (qualifying_scope) == TYPENAME_TYPE)
{
/* In the declaration of a member of a template class
outside of the class itself, the SCOPE will sometimes
be a TYPENAME_TYPE. For example, given:
template <typename T>
int S<T>::R::i = 3;
the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In
this context, we must resolve S<T>::R to an ordinary
type, rather than a typename type.
The reason we normally avoid resolving TYPENAME_TYPEs
is that a specialization of `S' might render
`S<T>::R' not a type. However, if `S' is
specialized, then this `i' will not be used, so there
is no harm in resolving the types here. */
tree type;
/* Resolve the TYPENAME_TYPE. */
type = resolve_typename_type (qualifying_scope,
/*only_current_p=*/false);
/* If that failed, the declarator is invalid. */
if (TREE_CODE (type) == TYPENAME_TYPE)
{
if (typedef_variant_p (type))
error_at (declarator_id_start_token->location,
"cannot define member of dependent typedef "
"%qT", type);
else
error_at (declarator_id_start_token->location,
"%<%T::%E%> is not a type",
TYPE_CONTEXT (qualifying_scope),
TYPE_IDENTIFIER (qualifying_scope));
}
qualifying_scope = type;
}
sfk = sfk_none;
if (unqualified_name)
{
tree class_type;
if (qualifying_scope
&& CLASS_TYPE_P (qualifying_scope))
class_type = qualifying_scope;
else
class_type = current_class_type;
if (TREE_CODE (unqualified_name) == TYPE_DECL)
{
tree name_type = TREE_TYPE (unqualified_name);
if (class_type && same_type_p (name_type, class_type))
{
if (qualifying_scope
&& CLASSTYPE_USE_TEMPLATE (name_type))
{
error_at (declarator_id_start_token->location,
"invalid use of constructor as a template");
inform (declarator_id_start_token->location,
"use %<%T::%D%> instead of %<%T::%D%> to "
"name the constructor in a qualified name",
class_type,
DECL_NAME (TYPE_TI_TEMPLATE (class_type)),
class_type, name_type);
declarator = cp_error_declarator;
break;
}
else
unqualified_name = constructor_name (class_type);
}
else
{
/* We do not attempt to print the declarator
here because we do not have enough
information about its original syntactic
form. */
cp_parser_error (parser, "invalid declarator");
declarator = cp_error_declarator;
break;
}
}
if (class_type)
{
if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR)
sfk = sfk_destructor;
else if (IDENTIFIER_TYPENAME_P (unqualified_name))
sfk = sfk_conversion;
else if (/* There's no way to declare a constructor
for an anonymous type, even if the type
got a name for linkage purposes. */
!TYPE_WAS_ANONYMOUS (class_type)
&& constructor_name_p (unqualified_name,
class_type))
{
unqualified_name = constructor_name (class_type);
sfk = sfk_constructor;
}
else if (is_overloaded_fn (unqualified_name)
&& DECL_CONSTRUCTOR_P (get_first_fn
(unqualified_name)))
sfk = sfk_constructor;
if (ctor_dtor_or_conv_p && sfk != sfk_none)
*ctor_dtor_or_conv_p = -1;
}
}
declarator = make_id_declarator (qualifying_scope,
unqualified_name,
sfk);
declarator->id_loc = token->location;
declarator->parameter_pack_p = pack_expansion_p;
if (pack_expansion_p)
maybe_warn_variadic_templates ();
}
handle_declarator:;
scope = get_scope_of_declarator (declarator);
if (scope)
/* Any names that appear after the declarator-id for a
member are looked up in the containing scope. */
pushed_scope = push_scope (scope);
parser->in_declarator_p = true;
if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p)
|| (declarator && declarator->kind == cdk_id))
/* Default args are only allowed on function
declarations. */
parser->default_arg_ok_p = saved_default_arg_ok_p;
else
parser->default_arg_ok_p = false;
first = false;
}
/* We're done. */
else
break;
}
/* For an abstract declarator, we might wind up with nothing at this
point. That's an error; the declarator is not optional. */
if (!declarator)
cp_parser_error (parser, "expected declarator");
/* If we entered a scope, we must exit it now. */
if (pushed_scope)
pop_scope (pushed_scope);
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
return declarator;
}
/* Parse a ptr-operator.
ptr-operator:
* cv-qualifier-seq [opt]
&
:: [opt] nested-name-specifier * cv-qualifier-seq [opt]
GNU Extension:
ptr-operator:
& cv-qualifier-seq [opt]
Returns INDIRECT_REF if a pointer, or pointer-to-member, was used.
Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for
an rvalue reference. In the case of a pointer-to-member, *TYPE is
filled in with the TYPE containing the member. *CV_QUALS is
filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there
are no cv-qualifiers. Returns ERROR_MARK if an error occurred.
Note that the tree codes returned by this function have nothing
to do with the types of trees that will be eventually be created
to represent the pointer or reference type being parsed. They are
just constants with suggestive names. */
static enum tree_code
cp_parser_ptr_operator (cp_parser* parser,
tree* type,
cp_cv_quals *cv_quals)
{
enum tree_code code = ERROR_MARK;
cp_token *token;
/* Assume that it's not a pointer-to-member. */
*type = NULL_TREE;
/* And that there are no cv-qualifiers. */
*cv_quals = TYPE_UNQUALIFIED;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `*', `&' or `&&' we have a pointer or reference. */
if (token->type == CPP_MULT)
code = INDIRECT_REF;
else if (token->type == CPP_AND)
code = ADDR_EXPR;
else if ((cxx_dialect != cxx98) &&
token->type == CPP_AND_AND) /* C++0x only */
code = NON_LVALUE_EXPR;
if (code != ERROR_MARK)
{
/* Consume the `*', `&' or `&&'. */
cp_lexer_consume_token (parser->lexer);
/* A `*' can be followed by a cv-qualifier-seq, and so can a
`&', if we are allowing GNU extensions. (The only qualifier
that can legally appear after `&' is `restrict', but that is
enforced during semantic analysis. */
if (code == INDIRECT_REF
|| cp_parser_allow_gnu_extensions_p (parser))
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
else
{
/* Try the pointer-to-member case. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name specifier. */
token = cp_lexer_peek_token (parser->lexer);
cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false);
/* If we found it, and the next token is a `*', then we are
indeed looking at a pointer-to-member operator. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_require (parser, CPP_MULT, RT_MULT))
{
/* Indicate that the `*' operator was used. */
code = INDIRECT_REF;
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error_at (token->location, "%qD is a namespace", parser->scope);
else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE)
error_at (token->location, "cannot form pointer to member of "
"non-class %q#T", parser->scope);
else
{
/* The type of which the member is a member is given by the
current SCOPE. */
*type = parser->scope;
/* The next name will not be qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Look for the optional cv-qualifier-seq. */
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
}
/* If that didn't work we don't have a ptr-operator. */
if (!cp_parser_parse_definitely (parser))
cp_parser_error (parser, "expected ptr-operator");
}
return code;
}
/* Parse an (optional) cv-qualifier-seq.
cv-qualifier-seq:
cv-qualifier cv-qualifier-seq [opt]
cv-qualifier:
const
volatile
GNU Extension:
cv-qualifier:
__restrict__
Returns a bitmask representing the cv-qualifiers. */
static cp_cv_quals
cp_parser_cv_qualifier_seq_opt (cp_parser* parser)
{
cp_cv_quals cv_quals = TYPE_UNQUALIFIED;
while (true)
{
cp_token *token;
cp_cv_quals cv_qualifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a cv-qualifier. */
switch (token->keyword)
{
case RID_CONST:
cv_qualifier = TYPE_QUAL_CONST;
break;
case RID_VOLATILE:
cv_qualifier = TYPE_QUAL_VOLATILE;
break;
case RID_RESTRICT:
cv_qualifier = TYPE_QUAL_RESTRICT;
break;
default:
cv_qualifier = TYPE_UNQUALIFIED;
break;
}
if (!cv_qualifier)
break;
if (cv_quals & cv_qualifier)
{
error_at (token->location, "duplicate cv-qualifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
cv_quals |= cv_qualifier;
}
}
return cv_quals;
}
/* Parse an (optional) virt-specifier-seq.
virt-specifier-seq:
virt-specifier virt-specifier-seq [opt]
virt-specifier:
override
final
Returns a bitmask representing the virt-specifiers. */
static cp_virt_specifiers
cp_parser_virt_specifier_seq_opt (cp_parser* parser)
{
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
while (true)
{
cp_token *token;
cp_virt_specifiers virt_specifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a virt-specifier-qualifier. */
if (token->type != CPP_NAME)
break;
if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_OVERRIDE;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_FINAL;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final"))
{
virt_specifier = VIRT_SPEC_FINAL;
}
else
break;
if (virt_specifiers & virt_specifier)
{
error_at (token->location, "duplicate virt-specifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
virt_specifiers |= virt_specifier;
}
}
return virt_specifiers;
}
/* Used by handling of trailing-return-types and NSDMI, in which 'this'
is in scope even though it isn't real. */
static void
inject_this_parameter (tree ctype, cp_cv_quals quals)
{
tree this_parm;
if (current_class_ptr)
{
/* We don't clear this between NSDMIs. Is it already what we want? */
tree type = TREE_TYPE (TREE_TYPE (current_class_ptr));
if (same_type_ignoring_top_level_qualifiers_p (ctype, type)
&& cp_type_quals (type) == quals)
return;
}
this_parm = build_this_parm (ctype, quals);
/* Clear this first to avoid shortcut in cp_build_indirect_ref. */
current_class_ptr = NULL_TREE;
current_class_ref
= cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error);
current_class_ptr = this_parm;
}
/* Parse a late-specified return type, if any. This is not a separate
non-terminal, but part of a function declarator, which looks like
-> trailing-type-specifier-seq abstract-declarator(opt)
Returns the type indicated by the type-id.
QUALS is either a bitmask of cv_qualifiers or -1 for a non-member
function. */
static tree
cp_parser_late_return_type_opt (cp_parser* parser, cp_cv_quals quals)
{
cp_token *token;
tree type;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* A late-specified return type is indicated by an initial '->'. */
if (token->type != CPP_DEREF)
return NULL_TREE;
/* Consume the ->. */
cp_lexer_consume_token (parser->lexer);
tree save_ccp = current_class_ptr;
tree save_ccr = current_class_ref;
if (quals >= 0)
{
/* DR 1207: 'this' is in scope in the trailing return type. */
inject_this_parameter (current_class_type, quals);
}
type = cp_parser_trailing_type_id (parser);
if (quals >= 0)
{
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
}
return type;
}
/* Parse a declarator-id.
declarator-id:
id-expression
:: [opt] nested-name-specifier [opt] type-name
In the `id-expression' case, the value returned is as for
cp_parser_id_expression if the id-expression was an unqualified-id.
If the id-expression was a qualified-id, then a SCOPE_REF is
returned. The first operand is the scope (either a NAMESPACE_DECL
or TREE_TYPE), but the second is still just a representation of an
unqualified-id. */
static tree
cp_parser_declarator_id (cp_parser* parser, bool optional_p)
{
tree id;
/* The expression must be an id-expression. Assume that qualified
names are the names of types so that:
template <class T>
int S<T>::R::i = 3;
will work; we must treat `S<T>::R' as the name of a type.
Similarly, assume that qualified names are templates, where
required, so that:
template <class T>
int S<T>::R<T>::i = 3;
will work, too. */
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*template_p=*/NULL,
/*declarator_p=*/true,
optional_p);
if (id && BASELINK_P (id))
id = BASELINK_FUNCTIONS (id);
return id;
}
/* Parse a type-id.
type-id:
type-specifier-seq abstract-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg,
bool is_trailing_return)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *abstract_declarator;
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
is_trailing_return,
&type_specifier_seq);
if (type_specifier_seq.type == error_mark_node)
return error_mark_node;
/* There might or might not be an abstract declarator. */
cp_parser_parse_tentatively (parser);
/* Look for the declarator. */
abstract_declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Check to see if there really was a declarator. */
if (!cp_parser_parse_definitely (parser))
abstract_declarator = NULL;
if (type_specifier_seq.type
&& type_uses_auto (type_specifier_seq.type))
{
/* A type-id with type 'auto' is only ok if the abstract declarator
is a function declarator with a late-specified return type. */
if (abstract_declarator
&& abstract_declarator->kind == cdk_function
&& abstract_declarator->u.function.late_return_type)
/* OK */;
else
{
error ("invalid use of %<auto%>");
return error_mark_node;
}
}
return groktypename (&type_specifier_seq, abstract_declarator,
is_template_arg);
}
static tree cp_parser_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, false);
}
static tree cp_parser_template_type_arg (cp_parser *parser)
{
tree r;
const char *saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in template arguments");
r = cp_parser_type_id_1 (parser, true, false);
parser->type_definition_forbidden_message = saved_message;
return r;
}
static tree cp_parser_trailing_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, true);
}
/* Parse a type-specifier-seq.
type-specifier-seq:
type-specifier type-specifier-seq [opt]
GNU extension:
type-specifier-seq:
attributes type-specifier-seq [opt]
If IS_DECLARATION is true, we are at the start of a "condition" or
exception-declaration, so we might be followed by a declarator-id.
If IS_TRAILING_RETURN is true, we are in a trailing-return-type,
i.e. we've just seen "->".
Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */
static void
cp_parser_type_specifier_seq (cp_parser* parser,
bool is_declaration,
bool is_trailing_return,
cp_decl_specifier_seq *type_specifier_seq)
{
bool seen_type_specifier = false;
cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL;
cp_token *start_token = NULL;
/* Clear the TYPE_SPECIFIER_SEQ. */
clear_decl_specs (type_specifier_seq);
/* In the context of a trailing return type, enum E { } is an
elaborated-type-specifier followed by a function-body, not an
enum-specifier. */
if (is_trailing_return)
flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS;
/* Parse the type-specifiers and attributes. */
while (true)
{
tree type_specifier;
bool is_cv_qualifier;
/* Check for attributes first. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
{
type_specifier_seq->attributes =
chainon (type_specifier_seq->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* record the token of the beginning of the type specifier seq,
for error reporting purposes*/
if (!start_token)
start_token = cp_lexer_peek_token (parser->lexer);
/* Look for the type-specifier. */
type_specifier = cp_parser_type_specifier (parser,
flags,
type_specifier_seq,
/*is_declaration=*/false,
NULL,
&is_cv_qualifier);
if (!type_specifier)
{
/* If the first type-specifier could not be found, this is not a
type-specifier-seq at all. */
if (!seen_type_specifier)
{
cp_parser_error (parser, "expected type-specifier");
type_specifier_seq->type = error_mark_node;
return;
}
/* If subsequent type-specifiers could not be found, the
type-specifier-seq is complete. */
break;
}
seen_type_specifier = true;
/* The standard says that a condition can be:
type-specifier-seq declarator = assignment-expression
However, given:
struct S {};
if (int S = ...)
we should treat the "S" as a declarator, not as a
type-specifier. The standard doesn't say that explicitly for
type-specifier-seq, but it does say that for
decl-specifier-seq in an ordinary declaration. Perhaps it
would be clearer just to allow a decl-specifier-seq here, and
then add a semantic restriction that if any decl-specifiers
that are not type-specifiers appear, the program is invalid. */
if (is_declaration && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
}
cp_parser_check_decl_spec (type_specifier_seq, start_token->location);
}
/* Parse a parameter-declaration-clause.
parameter-declaration-clause:
parameter-declaration-list [opt] ... [opt]
parameter-declaration-list , ...
Returns a representation for the parameter declarations. A return
value of NULL indicates a parameter-declaration-clause consisting
only of an ellipsis. */
static tree
cp_parser_parameter_declaration_clause (cp_parser* parser)
{
tree parameters;
cp_token *token;
bool ellipsis_p;
bool is_error;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for trivial parameter-declaration-clauses. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
else if (token->type == CPP_CLOSE_PAREN)
/* There are no parameters. */
{
#ifndef NO_IMPLICIT_EXTERN_C
if (in_system_header && current_class_type == NULL
&& current_lang_name == lang_name_c)
return NULL_TREE;
else
#endif
return void_list_node;
}
/* Check for `(void)', too, which is a special case. */
else if (token->keyword == RID_VOID
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN))
{
/* Consume the `void' token. */
cp_lexer_consume_token (parser->lexer);
/* There are no parameters. */
return void_list_node;
}
/* Parse the parameter-declaration-list. */
parameters = cp_parser_parameter_declaration_list (parser, &is_error);
/* If a parse error occurred while parsing the
parameter-declaration-list, then the entire
parameter-declaration-clause is erroneous. */
if (is_error)
return NULL;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', the clause should terminate with an ellipsis. */
if (token->type == CPP_COMMA)
{
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* Expect an ellipsis. */
ellipsis_p
= (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL);
}
/* It might also be `...' if the optional trailing `,' was
omitted. */
else if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* And remember that we saw it. */
ellipsis_p = true;
}
else
ellipsis_p = false;
/* Finish the parameter list. */
if (!ellipsis_p)
parameters = chainon (parameters, void_list_node);
return parameters;
}
/* Parse a parameter-declaration-list.
parameter-declaration-list:
parameter-declaration
parameter-declaration-list , parameter-declaration
Returns a representation of the parameter-declaration-list, as for
cp_parser_parameter_declaration_clause. However, the
`void_list_node' is never appended to the list. Upon return,
*IS_ERROR will be true iff an error occurred. */
static tree
cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error)
{
tree parameters = NULL_TREE;
tree *tail = ¶meters;
bool saved_in_unbraced_linkage_specification_p;
int index = 0;
/* Assume all will go well. */
*is_error = false;
/* The special considerations that apply to a function within an
unbraced linkage specifications do not apply to the parameters
to the function. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Look for more parameters. */
while (true)
{
cp_parameter_declarator *parameter;
tree decl = error_mark_node;
bool parenthesized_p = false;
/* Parse the parameter. */
parameter
= cp_parser_parameter_declaration (parser,
/*template_parm_p=*/false,
&parenthesized_p);
/* We don't know yet if the enclosing context is deprecated, so wait
and warn in grokparms if appropriate. */
deprecated_state = DEPRECATED_SUPPRESS;
if (parameter)
decl = grokdeclarator (parameter->declarator,
¶meter->decl_specifiers,
PARM,
parameter->default_argument != NULL_TREE,
¶meter->decl_specifiers.attributes);
deprecated_state = DEPRECATED_NORMAL;
/* If a parse error occurred parsing the parameter declaration,
then the entire parameter-declaration-list is erroneous. */
if (decl == error_mark_node)
{
*is_error = true;
parameters = error_mark_node;
break;
}
if (parameter->decl_specifiers.attributes)
cplus_decl_attributes (&decl,
parameter->decl_specifiers.attributes,
0);
if (DECL_NAME (decl))
decl = pushdecl (decl);
if (decl != error_mark_node)
{
retrofit_lang_decl (decl);
DECL_PARM_INDEX (decl) = ++index;
DECL_PARM_LEVEL (decl) = function_parm_depth ();
}
/* Add the new parameter to the list. */
*tail = build_tree_list (parameter->default_argument, decl);
tail = &TREE_CHAIN (*tail);
/* Peek at the next token. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
/* These are for Objective-C++ */
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
/* The parameter-declaration-list is complete. */
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, then the list is complete. */
if (token->type == CPP_ELLIPSIS)
break;
/* Otherwise, there must be more parameters. Consume the
`,'. */
cp_lexer_consume_token (parser->lexer);
/* When parsing something like:
int i(float f, double d)
we can tell after seeing the declaration for "f" that we
are not looking at an initialization of a variable "i",
but rather at the declaration of a function "i".
Due to the fact that the parsing of template arguments
(as specified to a template-id) requires backtracking we
cannot use this technique when inside a template argument
list. */
if (!parser->in_template_argument_list_p
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
/* However, a parameter-declaration of the form
"foat(f)" (which is a valid declaration of a
parameter "f") can also be interpreted as an
expression (the conversion of "f" to "float"). */
&& !parenthesized_p)
cp_parser_commit_to_tentative_parse (parser);
}
else
{
cp_parser_error (parser, "expected %<,%> or %<...%>");
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
break;
}
}
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return parameters;
}
/* Parse a parameter declaration.
parameter-declaration:
decl-specifier-seq ... [opt] declarator
decl-specifier-seq declarator = assignment-expression
decl-specifier-seq ... [opt] abstract-declarator [opt]
decl-specifier-seq abstract-declarator [opt] = assignment-expression
If TEMPLATE_PARM_P is TRUE, then this parameter-declaration
declares a template parameter. (In that case, a non-nested `>'
token encountered during the parsing of the assignment-expression
is not interpreted as a greater-than operator.)
Returns a representation of the parameter, or NULL if an error
occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to
true iff the declarator is of the form "(p)". */
static cp_parameter_declarator *
cp_parser_parameter_declaration (cp_parser *parser,
bool template_parm_p,
bool *parenthesized_p)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
cp_declarator *declarator;
tree default_argument;
cp_token *token = NULL, *declarator_token_start = NULL;
const char *saved_message;
/* In a template parameter, `>' is not an operator.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
/* Type definitions may not appear in parameter types. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in parameter types");
/* Parse the declaration-specifiers. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&decl_specifiers,
&declares_class_or_enum);
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p)
cp_parser_parse_and_diagnose_invalid_type_name (parser);
/* If an error occurred, there's no reason to attempt to parse the
rest of the declaration. */
if (cp_parser_error_occurred (parser))
{
parser->type_definition_forbidden_message = saved_message;
return NULL;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a `)', `,', `=', `>', or `...', then there
is no declarator. However, when variadic templates are enabled,
there may be a declarator following `...'. */
if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
{
declarator = NULL;
if (parenthesized_p)
*parenthesized_p = false;
}
/* Otherwise, there should be a declarator. */
else
{
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
parser->default_arg_ok_p = false;
/* After seeing a decl-specifier-seq, if the next token is not a
"(", there is no possibility that the code is a valid
expression. Therefore, if parsing tentatively, we commit at
this point. */
if (!parser->in_template_argument_list_p
/* In an expression context, having seen:
(int((char ...
we cannot be sure whether we are looking at a
function-type (taking a "char" as a parameter) or a cast
of some object of type "char" to "int". */
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the declarator. */
declarator_token_start = token;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
parenthesized_p,
/*member_p=*/false);
parser->default_arg_ok_p = saved_default_arg_ok_p;
/* After the declarator, allow more attributes. */
decl_specifiers.attributes
= chainon (decl_specifiers.attributes,
cp_parser_attributes_opt (parser));
}
/* If the next token is an ellipsis, and we have not seen a
declarator name, and the type of the declarator contains parameter
packs but it is not a TYPE_PACK_EXPANSION, then we actually have
a parameter pack expansion expression. Otherwise, leave the
ellipsis for a C-style variadic function. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
tree type = decl_specifiers.type;
if (type && DECL_P (type))
type = TREE_TYPE (type);
if (type
&& TREE_CODE (type) != TYPE_PACK_EXPANSION
&& declarator_can_be_parameter_pack (declarator)
&& (!declarator || !declarator->parameter_pack_p)
&& uses_parameter_packs (type))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
/* Build a pack expansion type */
if (declarator)
declarator->parameter_pack_p = true;
else
decl_specifiers.type = make_pack_expansion (type);
}
}
/* The restriction on defining new types applies only to the type
of the parameter, not to the default argument. */
parser->type_definition_forbidden_message = saved_message;
/* If the next token is `=', then process a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
token = cp_lexer_peek_token (parser->lexer);
/* If we are defining a class, then the tokens that make up the
default argument must be saved and processed later. */
if (!template_parm_p && at_class_scope_p ()
&& TYPE_BEING_DEFINED (current_class_type)
&& !LAMBDA_TYPE_P (current_class_type))
default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false);
/* Outside of a class definition, we can just parse the
assignment-expression. */
else
default_argument
= cp_parser_default_argument (parser, template_parm_p);
if (!parser->default_arg_ok_p)
{
if (flag_permissive)
warning (0, "deprecated use of default argument for parameter of non-function");
else
{
error_at (token->location,
"default arguments are only "
"permitted for function parameters");
default_argument = NULL_TREE;
}
}
else if ((declarator && declarator->parameter_pack_p)
|| (decl_specifiers.type
&& PACK_EXPANSION_P (decl_specifiers.type)))
{
/* Find the name of the parameter pack. */
cp_declarator *id_declarator = declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack %qD "
"cannot have a default argument")
: G_("parameter pack %qD cannot have "
"a default argument"),
id_declarator->u.id.unqualified_name);
else
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack cannot have "
"a default argument")
: G_("parameter pack cannot have a "
"default argument"));
default_argument = NULL_TREE;
}
}
else
default_argument = NULL_TREE;
return make_parameter_declarator (&decl_specifiers,
declarator,
default_argument);
}
/* Parse a default argument and return it.
TEMPLATE_PARM_P is true if this is a default argument for a
non-type template parameter. */
static tree
cp_parser_default_argument (cp_parser *parser, bool template_parm_p)
{
tree default_argument = NULL_TREE;
bool saved_greater_than_is_operator_p;
bool saved_local_variables_forbidden_p;
bool non_constant_p, is_direct_init;
/* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is
set correctly. */
saved_greater_than_is_operator_p = parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = !template_parm_p;
/* Local variable names (and the `this' keyword) may not
appear in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
/* Parse the assignment-expression. */
if (template_parm_p)
push_deferring_access_checks (dk_no_deferred);
default_argument
= cp_parser_initializer (parser, &is_direct_init, &non_constant_p);
if (BRACE_ENCLOSED_INITIALIZER_P (default_argument))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
if (template_parm_p)
pop_deferring_access_checks ();
parser->greater_than_is_operator_p = saved_greater_than_is_operator_p;
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
return default_argument;
}
/* Parse a function-body.
function-body:
compound_statement */
static void
cp_parser_function_body (cp_parser *parser)
{
cp_parser_compound_statement (parser, NULL, false, true);
}
/* Parse a ctor-initializer-opt followed by a function-body. Return
true if a ctor-initializer was present. */
static bool
cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser)
{
tree body, list;
bool ctor_initializer_p;
const bool check_body_p =
DECL_CONSTRUCTOR_P (current_function_decl)
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl);
tree last = NULL;
/* Begin the function body. */
body = begin_function_body ();
/* Parse the optional ctor-initializer. */
ctor_initializer_p = cp_parser_ctor_initializer_opt (parser);
/* If we're parsing a constexpr constructor definition, we need
to check that the constructor body is indeed empty. However,
before we get to cp_parser_function_body lot of junk has been
generated, so we can't just check that we have an empty block.
Rather we take a snapshot of the outermost block, and check whether
cp_parser_function_body changed its state. */
if (check_body_p)
{
list = cur_stmt_list;
if (STATEMENT_LIST_TAIL (list))
last = STATEMENT_LIST_TAIL (list)->stmt;
}
/* Parse the function-body. */
cp_parser_function_body (parser);
if (check_body_p)
check_constexpr_ctor_body (last, list);
/* Finish the function body. */
finish_function_body (body);
return ctor_initializer_p;
}
/* Parse an initializer.
initializer:
= initializer-clause
( expression-list )
Returns an expression representing the initializer. If no
initializer is present, NULL_TREE is returned.
*IS_DIRECT_INIT is set to FALSE if the `= initializer-clause'
production is used, and TRUE otherwise. *IS_DIRECT_INIT is
set to TRUE if there is no initializer present. If there is an
initializer, and it is not a constant-expression, *NON_CONSTANT_P
is set to true; otherwise it is set to false. */
static tree
cp_parser_initializer (cp_parser* parser, bool* is_direct_init,
bool* non_constant_p)
{
cp_token *token;
tree init;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Let our caller know whether or not this initializer was
parenthesized. */
*is_direct_init = (token->type != CPP_EQ);
/* Assume that the initializer is constant. */
*non_constant_p = false;
if (token->type == CPP_EQ)
{
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer-clause. */
init = cp_parser_initializer_clause (parser, non_constant_p);
}
else if (token->type == CPP_OPEN_PAREN)
{
VEC(tree,gc) *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
non_constant_p);
if (vec == NULL)
return error_mark_node;
init = build_tree_list_vec (vec);
release_tree_vector (vec);
}
else if (token->type == CPP_OPEN_BRACE)
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
init = cp_parser_braced_list (parser, non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (init) = 1;
}
else
{
/* Anything else is an error. */
cp_parser_error (parser, "expected initializer");
init = error_mark_node;
}
return init;
}
/* Parse an initializer-clause.
initializer-clause:
assignment-expression
braced-init-list
Returns an expression representing the initializer.
If the `assignment-expression' production is used the value
returned is simply a representation for the expression.
Otherwise, calls cp_parser_braced_list. */
static tree
cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Assume the expression is constant. */
*non_constant_p = false;
/* If it is not a `{', then we are looking at an
assignment-expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
{
initializer
= cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
non_constant_p);
}
else
initializer = cp_parser_braced_list (parser, non_constant_p);
return initializer;
}
/* Parse a brace-enclosed initializer list.
braced-init-list:
{ initializer-list , [opt] }
{ }
Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be
the elements of the initializer-list (or NULL, if the last
production is used). The TREE_TYPE for the CONSTRUCTOR will be
NULL_TREE. There is no way to detect whether or not the optional
trailing `,' was provided. NON_CONSTANT_P is as for
cp_parser_initializer. */
static tree
cp_parser_braced_list (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Create a CONSTRUCTOR to represent the braced-initializer. */
initializer = make_node (CONSTRUCTOR);
/* If it's not a `}', then there is a non-trivial initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
{
/* Parse the initializer list. */
CONSTRUCTOR_ELTS (initializer)
= cp_parser_initializer_list (parser, non_constant_p);
/* A trailing `,' token is allowed. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
}
/* Now, there should be a trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
TREE_TYPE (initializer) = init_list_type_node;
return initializer;
}
/* Parse an initializer-list.
initializer-list:
initializer-clause ... [opt]
initializer-list , initializer-clause ... [opt]
GNU Extension:
initializer-list:
designation initializer-clause ...[opt]
initializer-list , designation initializer-clause ...[opt]
designation:
. identifier =
identifier :
[ constant-expression ] =
Returns a VEC of constructor_elt. The VALUE of each elt is an expression
for the initializer. If the INDEX of the elt is non-NULL, it is the
IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is
as for cp_parser_initializer. */
static VEC(constructor_elt,gc) *
cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p)
{
VEC(constructor_elt,gc) *v = NULL;
/* Assume all of the expressions are constant. */
*non_constant_p = false;
/* Parse the rest of the list. */
while (true)
{
cp_token *token;
tree designator;
tree initializer;
bool clause_non_constant_p;
/* If the next token is an identifier and the following one is a
colon, we are looking at the GNU designated-initializer
syntax. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow designated initializers");
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle the C99 syntax, '. id ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_DOT)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_pedantic,
"ISO C++ does not allow C99 designated initializers");
/* Consume the `.'. */
cp_lexer_consume_token (parser->lexer);
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle C99 array designators, '[ const ] ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& !c_dialect_objc ()
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* In C++11, [ could start a lambda-introducer. */
bool non_const = false;
cp_parser_parse_tentatively (parser);
cp_lexer_consume_token (parser->lexer);
designator = cp_parser_constant_expression (parser, true, &non_const);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
cp_parser_require (parser, CPP_EQ, RT_EQ);
if (!cp_parser_parse_definitely (parser))
designator = NULL_TREE;
else if (non_const)
require_potential_rvalue_constant_expression (designator);
}
else
designator = NULL_TREE;
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&clause_non_constant_p);
/* If any clause is non-constant, so is the entire initializer. */
if (clause_non_constant_p)
*non_constant_p = true;
/* If we have an ellipsis, this is an initializer pack
expansion. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the initializer into an initializer expansion. */
initializer = make_pack_expansion (initializer);
}
/* Add it to the vector. */
CONSTRUCTOR_APPEND_ELT (v, designator, initializer);
/* If the next token is not a comma, we have reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If the next token is a `}', then we're still done. An
initializer-clause can have a trailing `,' after the
initializer-list and before the closing `}'. */
if (token->type == CPP_CLOSE_BRACE)
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return v;
}
/* Classes [gram.class] */
/* Parse a class-name.
class-name:
identifier
template-id
TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used
to indicate that names looked up in dependent types should be
assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template'
keyword has been used to indicate that the name that appears next
is a template. TAG_TYPE indicates the explicit tag given before
the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are
looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class
is the class being defined in a class-head.
Returns the TYPE_DECL representing the class. */
static tree
cp_parser_class_name (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
enum tag_types tag_type,
bool check_dependency_p,
bool class_head_p,
bool is_declaration)
{
tree decl;
tree scope;
bool typename_p;
cp_token *token;
tree identifier = NULL_TREE;
/* All class-names start with an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID)
{
cp_parser_error (parser, "expected class-name");
return error_mark_node;
}
/* PARSER->SCOPE can be cleared when parsing the template-arguments
to a template-id, so we save it here. */
scope = parser->scope;
if (scope == error_mark_node)
return error_mark_node;
/* Any name names a type if we're following the `typename' keyword
in a qualified name where the enclosing scope is type-dependent. */
typename_p = (typename_keyword_p && scope && TYPE_P (scope)
&& dependent_type_p (scope));
/* Handle the common case (an identifier, but not a template-id)
efficiently. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))
{
cp_token *identifier_token;
bool ambiguous_p;
/* Look for the identifier. */
identifier_token = cp_lexer_peek_token (parser->lexer);
ambiguous_p = identifier_token->ambiguous_p;
identifier = cp_parser_identifier (parser);
/* If the next token isn't an identifier, we are certainly not
looking at a class-name. */
if (identifier == error_mark_node)
decl = error_mark_node;
/* If we know this is a type-name, there's no need to look it
up. */
else if (typename_p)
decl = identifier;
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* If the next token is a `::', then the name must be a type
name.
[basic.lookup.qual]
During the lookup for a name preceding the :: scope
resolution operator, object, function, and enumerator
names are ignored. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
tag_type = typename_type;
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
check_dependency_p,
&ambiguous_decls,
identifier_token->location);
if (ambiguous_decls)
{
if (cp_parser_parsing_tentatively (parser))
cp_parser_simulate_error (parser);
return error_mark_node;
}
}
}
else
{
/* Try a template-id. */
decl = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
is_declaration);
if (decl == error_mark_node)
return error_mark_node;
}
decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p);
/* If this is a typename, create a TYPENAME_TYPE. */
if (typename_p && decl != error_mark_node)
{
decl = make_typename_type (scope, decl, typename_type,
/*complain=*/tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
decl = strip_using_decl (decl);
/* Check to see that it is really the name of a class. */
if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* Situations like this:
template <typename T> struct A {
typename T::template X<int>::I i;
};
are problematic. Is `T::template X<int>' a class-name? The
standard does not seem to be definitive, but there is no other
valid interpretation of the following `::'. Therefore, those
names are considered class-names. */
{
decl = make_typename_type (scope, decl, tag_type, tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
else if (TREE_CODE (decl) != TYPE_DECL
|| TREE_TYPE (decl) == error_mark_node
|| !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl))
/* In Objective-C 2.0, a classname followed by '.' starts a
dot-syntax expression, and it's not a type-name. */
|| (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& objc_is_class_name (decl)))
decl = error_mark_node;
if (decl == error_mark_node)
cp_parser_error (parser, "expected class-name");
else if (identifier && !parser->scope)
maybe_note_name_used_in_class (identifier, decl);
return decl;
}
/* Parse a class-specifier.
class-specifier:
class-head { member-specification [opt] }
Returns the TREE_TYPE representing the class. */
static tree
cp_parser_class_specifier_1 (cp_parser* parser)
{
tree type;
tree attributes = NULL_TREE;
bool nested_name_specifier_p;
unsigned saved_num_template_parameter_lists;
bool saved_in_function_body;
unsigned char in_statement;
bool in_switch_statement_p;
bool saved_in_unbraced_linkage_specification_p;
tree old_scope = NULL_TREE;
tree scope = NULL_TREE;
cp_token *closing_brace;
push_deferring_access_checks (dk_no_deferred);
/* Parse the class-head. */
type = cp_parser_class_head (parser,
&nested_name_specifier_p);
/* If the class-head was a semantic disaster, skip the entire body
of the class. */
if (!type)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
pop_deferring_access_checks ();
return error_mark_node;
}
/* Look for the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Issue an error message if type-definitions are forbidden here. */
cp_parser_check_type_definition (parser);
/* Remember that we are defining one more class. */
++parser->num_classes_being_defined;
/* Inside the class, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* We are not in a function body. */
saved_in_function_body = parser->in_function_body;
parser->in_function_body = false;
/* Or in a loop. */
in_statement = parser->in_statement;
parser->in_statement = 0;
/* Or in a switch. */
in_switch_statement_p = parser->in_switch_statement_p;
parser->in_switch_statement_p = false;
/* We are not immediately inside an extern "lang" block. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Start the class. */
if (nested_name_specifier_p)
{
scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type));
old_scope = push_inner_scope (scope);
}
type = begin_class_definition (type);
if (type == error_mark_node)
/* If the type is erroneous, skip the entire body of the class. */
cp_parser_skip_to_closing_brace (parser);
else
/* Parse the member-specification. */
cp_parser_member_specification_opt (parser);
/* Look for the trailing `}'. */
closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
/* Look for trailing attributes to apply to this class. */
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_attributes_opt (parser);
if (type != error_mark_node)
type = finish_struct (type, attributes);
if (nested_name_specifier_p)
pop_inner_scope (old_scope, scope);
/* We've finished a type definition. Check for the common syntax
error of forgetting a semicolon after the definition. We need to
be careful, as we can't just check for not-a-semicolon and be done
with it; the user might have typed:
class X { } c = ...;
class X { } *p = ...;
and so forth. Instead, enumerate all the possible tokens that
might follow this production; if we don't see one of them, then
complain and silently insert the semicolon. */
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
bool want_semicolon = true;
switch (token->type)
{
case CPP_NAME:
case CPP_SEMICOLON:
case CPP_MULT:
case CPP_AND:
case CPP_OPEN_PAREN:
case CPP_CLOSE_PAREN:
case CPP_COMMA:
want_semicolon = false;
break;
/* While it's legal for type qualifiers and storage class
specifiers to follow type definitions in the grammar, only
compiler testsuites contain code like that. Assume that if
we see such code, then what we're really seeing is a case
like:
class X { }
const <type> var = ...;
or
class Y { }
static <type> func (...) ...
i.e. the qualifier or specifier applies to the next
declaration. To do so, however, we need to look ahead one
more token to see if *that* token is a type specifier.
This code could be improved to handle:
class Z { }
static const <type> var = ...; */
case CPP_KEYWORD:
if (keyword_is_decl_specifier (token->keyword))
{
cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Handling user-defined types here would be nice, but very
tricky. */
want_semicolon
= (lookahead->type == CPP_KEYWORD
&& keyword_begins_type_specifier (lookahead->keyword));
}
break;
default:
break;
}
/* If we don't have a type, then something is very wrong and we
shouldn't try to do anything clever. Likewise for not seeing the
closing brace. */
if (closing_brace && TYPE_P (type) && want_semicolon)
{
cp_token_position prev
= cp_lexer_previous_token_position (parser->lexer);
cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev);
location_t loc = prev_token->location;
if (CLASSTYPE_DECLARED_CLASS (type))
error_at (loc, "expected %<;%> after class definition");
else if (TREE_CODE (type) == RECORD_TYPE)
error_at (loc, "expected %<;%> after struct definition");
else if (TREE_CODE (type) == UNION_TYPE)
error_at (loc, "expected %<;%> after union definition");
else
gcc_unreachable ();
/* Unget one token and smash it to look as though we encountered
a semicolon in the input stream. */
cp_lexer_set_token_position (parser->lexer, prev);
token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_SEMICOLON;
token->keyword = RID_MAX;
}
}
/* If this class is not itself within the scope of another class,
then we need to parse the bodies of all of the queued function
definitions. Note that the queued functions defined in a class
are not always processed immediately following the
class-specifier for that class. Consider:
struct A {
struct B { void f() { sizeof (A); } };
};
If `f' were processed before the processing of `A' were
completed, there would be no way to compute the size of `A'.
Note that the nesting we are interested in here is lexical --
not the semantic nesting given by TYPE_CONTEXT. In particular,
for:
struct A { struct B; };
struct A::B { void f() { } };
there is no need to delay the parsing of `A::B::f'. */
if (--parser->num_classes_being_defined == 0)
{
tree decl;
tree class_type = NULL_TREE;
tree pushed_scope = NULL_TREE;
unsigned ix;
cp_default_arg_entry *e;
tree save_ccp, save_ccr;
/* In a first pass, parse default arguments to the functions.
Then, in a second pass, parse the bodies of the functions.
This two-phased approach handles cases like:
struct S {
void f() { g(); }
void g(int i = 3);
};
*/
FOR_EACH_VEC_ELT (cp_default_arg_entry, unparsed_funs_with_default_args,
ix, e)
{
decl = e->decl;
/* If there are default arguments that have not yet been processed,
take care of them now. */
if (class_type != e->class_type)
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = e->class_type;
pushed_scope = push_scope (class_type);
}
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (decl);
/* Parse the default argument expressions. */
cp_parser_late_parsing_default_args (parser, decl);
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
}
VEC_truncate (cp_default_arg_entry, unparsed_funs_with_default_args, 0);
/* Now parse any NSDMIs. */
save_ccp = current_class_ptr;
save_ccr = current_class_ref;
FOR_EACH_VEC_ELT (tree, unparsed_nsdmis, ix, decl)
{
if (class_type != DECL_CONTEXT (decl))
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = DECL_CONTEXT (decl);
pushed_scope = push_scope (class_type);
}
inject_this_parameter (class_type, TYPE_UNQUALIFIED);
cp_parser_late_parsing_nsdmi (parser, decl);
}
VEC_truncate (tree, unparsed_nsdmis, 0);
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
if (pushed_scope)
pop_scope (pushed_scope);
/* Now parse the body of the functions. */
FOR_EACH_VEC_ELT (tree, unparsed_funs_with_definitions, ix, decl)
cp_parser_late_parsing_for_member (parser, decl);
VEC_truncate (tree, unparsed_funs_with_definitions, 0);
}
/* Put back any saved access checks. */
pop_deferring_access_checks ();
/* Restore saved state. */
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
parser->in_function_body = saved_in_function_body;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return type;
}
static tree
cp_parser_class_specifier (cp_parser* parser)
{
tree ret;
timevar_push (TV_PARSE_STRUCT);
ret = cp_parser_class_specifier_1 (parser);
timevar_pop (TV_PARSE_STRUCT);
return ret;
}
/* Parse a class-head.
class-head:
class-key identifier [opt] base-clause [opt]
class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt]
class-key nested-name-specifier [opt] template-id
base-clause [opt]
class-virt-specifier:
final
GNU Extensions:
class-key attributes identifier [opt] base-clause [opt]
class-key attributes nested-name-specifier identifier base-clause [opt]
class-key attributes nested-name-specifier [opt] template-id
base-clause [opt]
Upon return BASES is initialized to the list of base classes (or
NULL, if there are none) in the same form returned by
cp_parser_base_clause.
Returns the TYPE of the indicated class. Sets
*NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions
involving a nested-name-specifier was used, and FALSE otherwise.
Returns error_mark_node if this is not a class-head.
Returns NULL_TREE if the class-head is syntactically valid, but
semantically invalid in a way that means we should skip the entire
body of the class. */
static tree
cp_parser_class_head (cp_parser* parser,
bool* nested_name_specifier_p)
{
tree nested_name_specifier;
enum tag_types class_key;
tree id = NULL_TREE;
tree type = NULL_TREE;
tree attributes;
tree bases;
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
bool template_id_p = false;
bool qualified_p = false;
bool invalid_nested_name_p = false;
bool invalid_explicit_specialization_p = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
tree pushed_scope = NULL_TREE;
unsigned num_templates;
cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL;
/* Assume no nested-name-specifier will be present. */
*nested_name_specifier_p = false;
/* Assume no template parameter lists will be used in defining the
type. */
num_templates = 0;
parser->colon_corrects_to_scope_p = false;
/* Look for the class-key. */
class_key = cp_parser_class_key (parser);
if (class_key == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* If the next token is `::', that is invalid -- but sometimes
people do try to write:
struct ::S {};
Handle this gracefully by accepting the extra qualifier, and then
issuing an error about it later if this really is a
class-head. If it turns out just to be an elaborated type
specifier, remain silent. */
if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false))
qualified_p = true;
push_deferring_access_checks (dk_no_check);
/* Determine the name of the class. Begin by looking for an
optional nested-name-specifier. */
nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
/* If there was a nested-name-specifier, then there *must* be an
identifier. */
if (nested_name_specifier)
{
type_start_token = cp_lexer_peek_token (parser->lexer);
/* Although the grammar says `identifier', it really means
`class-name' or `template-name'. You are only allowed to
define a class that has already been declared with this
syntax.
The proposed resolution for Core Issue 180 says that wherever
you see `class T::X' you should treat `X' as a type-name.
It is OK to define an inaccessible class; for example:
class A { class B; };
class A::B {};
We do not know if we will see a class-name, or a
template-name. We look for a class-name first, in case the
class-name is a template-id; if we looked for the
template-name first we would stop after the template-name. */
cp_parser_parse_tentatively (parser);
type = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
class_type,
/*check_dependency_p=*/false,
/*class_head_p=*/true,
/*is_declaration=*/false);
/* If that didn't work, ignore the nested-name-specifier. */
if (!cp_parser_parse_definitely (parser))
{
invalid_nested_name_p = true;
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
if (id == error_mark_node)
id = NULL_TREE;
}
/* If we could not find a corresponding TYPE, treat this
declaration like an unqualified declaration. */
if (type == error_mark_node)
nested_name_specifier = NULL_TREE;
/* Otherwise, count the number of templates used in TYPE and its
containing scopes. */
else
{
tree scope;
for (scope = TREE_TYPE (type);
scope && TREE_CODE (scope) != NAMESPACE_DECL;
scope = (TYPE_P (scope)
? TYPE_CONTEXT (scope)
: DECL_CONTEXT (scope)))
if (TYPE_P (scope)
&& CLASS_TYPE_P (scope)
&& CLASSTYPE_TEMPLATE_INFO (scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope))
++num_templates;
}
}
/* Otherwise, the identifier is optional. */
else
{
/* We don't know whether what comes next is a template-id,
an identifier, or nothing at all. */
cp_parser_parse_tentatively (parser);
/* Check for a template-id. */
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*is_declaration=*/true);
/* If that didn't work, it could still be an identifier. */
if (!cp_parser_parse_definitely (parser))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
}
else
id = NULL_TREE;
}
else
{
template_id_p = true;
++num_templates;
}
}
pop_deferring_access_checks ();
if (id)
{
cp_parser_check_for_invalid_template_id (parser, id,
type_start_token->location);
}
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
/* If it's not a `:' or a `{' then we can't really be looking at a
class-head, since a class-head only appears as part of a
class-specifier. We have to detect this situation before calling
xref_tag, since that has irreversible side-effects. */
if (!cp_parser_next_token_starts_class_definition_p (parser))
{
cp_parser_error (parser, "expected %<{%> or %<:%>");
type = error_mark_node;
goto out;
}
/* At this point, we're going ahead with the class-specifier, even
if some other problem occurs. */
cp_parser_commit_to_tentative_parse (parser);
if (virt_specifiers & VIRT_SPEC_OVERRIDE)
{
cp_parser_error (parser,
"cannot specify %<override%> for a class");
type = error_mark_node;
goto out;
}
/* Issue the error about the overly-qualified name now. */
if (qualified_p)
{
cp_parser_error (parser,
"global qualification of class name is invalid");
type = error_mark_node;
goto out;
}
else if (invalid_nested_name_p)
{
cp_parser_error (parser,
"qualified name does not name a class");
type = error_mark_node;
goto out;
}
else if (nested_name_specifier)
{
tree scope;
/* Reject typedef-names in class heads. */
if (!DECL_IMPLICIT_TYPEDEF_P (type))
{
error_at (type_start_token->location,
"invalid class name in declaration of %qD",
type);
type = NULL_TREE;
goto done;
}
/* Figure out in what scope the declaration is being placed. */
scope = current_scope ();
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (scope && !is_ancestor (scope, nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not enclose %qD",
type, scope, nested_name_specifier);
type = NULL_TREE;
goto done;
}
/* [dcl.meaning]
A declarator-id shall not be qualified except for the
definition of a ... nested class outside of its class
... [or] the definition or explicit instantiation of a
class member of a namespace outside of its namespace. */
if (scope == nested_name_specifier)
{
permerror (nested_name_specifier_token_start->location,
"extra qualification not allowed");
nested_name_specifier = NULL_TREE;
num_templates = 0;
}
}
/* An explicit-specialization must be preceded by "template <>". If
it is not, try to recover gracefully. */
if (at_namespace_scope_p ()
&& parser->num_template_parameter_lists == 0
&& template_id_p)
{
error_at (type_start_token->location,
"an explicit specialization must be preceded by %<template <>%>");
invalid_explicit_specialization_p = true;
/* Take the same action that would have been taken by
cp_parser_explicit_specialization. */
++parser->num_template_parameter_lists;
begin_specialization ();
}
/* There must be no "return" statements between this point and the
end of this function; set "type "to the correct return value and
use "goto done;" to return. */
/* Make sure that the right number of template parameters were
present. */
if (!cp_parser_check_template_parameters (parser, num_templates,
type_start_token->location,
/*declarator=*/NULL))
{
/* If something went wrong, there is no point in even trying to
process the class-definition. */
type = NULL_TREE;
goto done;
}
/* Look up the type. */
if (template_id_p)
{
if (TREE_CODE (id) == TEMPLATE_ID_EXPR
&& (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0))
|| TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD))
{
error_at (type_start_token->location,
"function template %qD redeclared as a class template", id);
type = error_mark_node;
}
else
{
type = TREE_TYPE (id);
type = maybe_process_partial_specialization (type);
}
if (nested_name_specifier)
pushed_scope = push_scope (nested_name_specifier);
}
else if (nested_name_specifier)
{
tree class_type;
/* Given:
template <typename T> struct S { struct T };
template <typename T> struct S<T>::T { };
we will get a TYPENAME_TYPE when processing the definition of
`S::T'. We need to resolve it to the actual type before we
try to define it. */
if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE)
{
class_type = resolve_typename_type (TREE_TYPE (type),
/*only_current_p=*/false);
if (TREE_CODE (class_type) != TYPENAME_TYPE)
type = TYPE_NAME (class_type);
else
{
cp_parser_error (parser, "could not resolve typename type");
type = error_mark_node;
}
}
if (maybe_process_partial_specialization (TREE_TYPE (type))
== error_mark_node)
{
type = NULL_TREE;
goto done;
}
class_type = current_class_type;
/* Enter the scope indicated by the nested-name-specifier. */
pushed_scope = push_scope (nested_name_specifier);
/* Get the canonical version of this type. */
type = TYPE_MAIN_DECL (TREE_TYPE (type));
if (PROCESSING_REAL_TEMPLATE_DECL_P ()
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type)))
{
type = push_template_decl (type);
if (type == error_mark_node)
{
type = NULL_TREE;
goto done;
}
}
type = TREE_TYPE (type);
*nested_name_specifier_p = true;
}
else /* The name is not a nested name. */
{
/* If the class was unnamed, create a dummy name. */
if (!id)
id = make_anon_name ();
type = xref_tag (class_key, id, /*tag_scope=*/ts_current,
parser->num_template_parameter_lists);
}
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type);
cp_parser_check_class_key (class_key, type);
/* If this type was already complete, and we see another definition,
that's an error. */
if (type != error_mark_node && COMPLETE_TYPE_P (type))
{
error_at (type_start_token->location, "redefinition of %q#T",
type);
error_at (type_start_token->location, "previous definition of %q+#T",
type);
type = NULL_TREE;
goto done;
}
else if (type == error_mark_node)
type = NULL_TREE;
if (type)
{
/* Apply attributes now, before any use of the class as a template
argument in its base list. */
cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE);
fixup_attribute_variants (type);
}
/* We will have entered the scope containing the class; the names of
base classes should be looked up in that context. For example:
struct A { struct B {}; struct C; };
struct A::C : B {};
is valid. */
/* Get the list of base-classes, if there is one. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
bases = cp_parser_base_clause (parser);
else
bases = NULL_TREE;
/* If we're really defining a class, process the base classes.
If they're invalid, fail. */
if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !xref_basetypes (type, bases))
type = NULL_TREE;
done:
/* Leave the scope given by the nested-name-specifier. We will
enter the class scope itself while processing the members. */
if (pushed_scope)
pop_scope (pushed_scope);
if (invalid_explicit_specialization_p)
{
end_specialization ();
--parser->num_template_parameter_lists;
}
if (type)
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
if (type && (virt_specifiers & VIRT_SPEC_FINAL))
CLASSTYPE_FINAL (type) = 1;
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse a class-key.
class-key:
class
struct
union
Returns the kind of class-key specified, or none_type to indicate
error. */
static enum tag_types
cp_parser_class_key (cp_parser* parser)
{
cp_token *token;
enum tag_types tag_type;
/* Look for the class-key. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY);
if (!token)
return none_type;
/* Check to see if the TOKEN is a class-key. */
tag_type = cp_parser_token_is_class_key (token);
if (!tag_type)
cp_parser_error (parser, "expected class-key");
return tag_type;
}
/* Parse an (optional) member-specification.
member-specification:
member-declaration member-specification [opt]
access-specifier : member-specification [opt] */
static void
cp_parser_member_specification_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `}', or EOF then we've seen all the members. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
/* See if this token is a keyword. */
keyword = token->keyword;
switch (keyword)
{
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
/* Remember which access-specifier is active. */
current_access_specifier = token->u.value;
/* Look for the `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
break;
default:
/* Accept #pragmas at class scope. */
if (token->type == CPP_PRAGMA)
{
cp_parser_pragma (parser, pragma_external);
break;
}
/* Otherwise, the next construction must be a
member-declaration. */
cp_parser_member_declaration (parser);
}
}
}
/* Parse a member-declaration.
member-declaration:
decl-specifier-seq [opt] member-declarator-list [opt] ;
function-definition ; [opt]
:: [opt] nested-name-specifier template [opt] unqualified-id ;
using-declaration
template-declaration
alias-declaration
member-declarator-list:
member-declarator
member-declarator-list , member-declarator
member-declarator:
declarator pure-specifier [opt]
declarator constant-initializer [opt]
identifier [opt] : constant-expression
GNU Extensions:
member-declaration:
__extension__ member-declaration
member-declarator:
declarator attributes [opt] pure-specifier [opt]
declarator attributes [opt] constant-initializer [opt]
identifier [opt] attributes [opt] : constant-expression
C++0x Extensions:
member-declaration:
static_assert-declaration */
static void
cp_parser_member_declaration (cp_parser* parser)
{
cp_decl_specifier_seq decl_specifiers;
tree prefix_attributes;
tree decl;
int declares_class_or_enum;
bool friend_p;
cp_token *token = NULL;
cp_token *decl_spec_token_start = NULL;
cp_token *initializer_token_start = NULL;
int saved_pedantic;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Recurse. */
cp_parser_member_declaration (parser);
/* Restore the old value of the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Check for a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* An explicit specialization here is an error condition, and we
expect the specialization handler to detect and report this. */
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
else
cp_parser_template_declaration (parser, /*member_p=*/true);
return;
}
/* Check for a using-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
{
if (cxx_dialect < cxx0x)
{
/* Parse the using-declaration. */
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
else
{
tree decl;
cp_parser_parse_tentatively (parser);
decl = cp_parser_alias_declaration (parser);
if (cp_parser_parse_definitely (parser))
finish_member_declaration (decl);
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
}
/* Check for @defs. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS))
{
tree ivar, member;
tree ivar_chains = cp_parser_objc_defs_expression (parser);
ivar = ivar_chains;
while (ivar)
{
member = ivar;
ivar = TREE_CHAIN (member);
TREE_CHAIN (member) = NULL_TREE;
finish_member_declaration (member);
}
return;
}
/* If the next token is `static_assert' we have a static assertion. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT))
{
cp_parser_static_assert (parser, /*member_p=*/true);
return;
}
parser->colon_corrects_to_scope_p = false;
if (cp_parser_using_declaration (parser, /*access_declaration=*/true))
goto out;
/* Parse the decl-specifier-seq. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
prefix_attributes = decl_specifiers.attributes;
decl_specifiers.attributes = NULL_TREE;
/* Check for an invalid type-name. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
goto out;
/* If there is no declarator, then the decl-specifier-seq should
specify a type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
/* If there was no decl-specifier-seq, and the next token is a
`;', then we have something like:
struct S { ; };
[class.mem]
Each member-declaration shall declare at least one member
name of the class. */
if (!decl_specifiers.any_specifiers_p)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (!in_system_header_at (token->location))
pedwarn (token->location, OPT_pedantic, "extra %<;%>");
}
else
{
tree type;
/* See if this declaration is a friend. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* If there were decl-specifiers, check to see if there was
a class-declaration. */
type = check_tag_decl (&decl_specifiers);
/* Nested classes have already been added to the class, but
a `friend' needs to be explicitly registered. */
if (friend_p)
{
/* If the `friend' keyword was present, the friend must
be introduced with a class-key. */
if (!declares_class_or_enum && cxx_dialect < cxx0x)
pedwarn (decl_spec_token_start->location, OPT_pedantic,
"in C++03 a class-key must be used "
"when declaring a friend");
/* In this case:
template <typename T> struct A {
friend struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by check_tag_decl. */
if (!type)
{
type = decl_specifiers.type;
if (type && TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
}
if (!type || !TYPE_P (type))
error_at (decl_spec_token_start->location,
"friend declaration does not name a class or "
"function");
else
make_friend_class (current_class_type, type,
/*complain=*/true);
}
/* If there is no TYPE, an error message will already have
been issued. */
else if (!type || type == error_mark_node)
;
/* An anonymous aggregate has to be handled specially; such
a declaration really declares a data member (with a
particular type), as opposed to a nested class. */
else if (ANON_AGGR_TYPE_P (type))
{
/* Remove constructors and such from TYPE, now that we
know it is an anonymous aggregate. */
fixup_anonymous_aggr (type);
/* And make the corresponding data member. */
decl = build_decl (decl_spec_token_start->location,
FIELD_DECL, NULL_TREE, type);
/* Add it to the class. */
finish_member_declaration (decl);
}
else
cp_parser_check_access_in_redeclaration
(TYPE_NAME (type),
decl_spec_token_start->location);
}
}
else
{
bool assume_semicolon = false;
/* See if these declarations will be friends. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes = NULL_TREE;
tree first_attribute;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for a bitfield declaration. */
if (token->type == CPP_COLON
|| (token->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
tree identifier;
tree width;
/* Get the name of the bitfield. Note that we cannot just
check TOKEN here because it may have been invalidated by
the call to cp_lexer_peek_nth_token above. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON)
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
/* Look for attributes that apply to the bitfield. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* Create the bitfield declaration. */
decl = grokbitfield (identifier
? make_id_declarator (NULL_TREE,
identifier,
sfk_none)
: NULL,
&decl_specifiers,
width,
attributes);
}
else
{
cp_declarator *declarator;
tree initializer;
tree asm_specification;
int ctor_dtor_or_conv_p;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true);
/* If something went wrong parsing the declarator, make sure
that we at least consume some tokens. */
if (declarator == cp_error_declarator)
{
/* Skip to the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is not a semicolon, that is
probably because we just skipped over the body of
a function. So, we consume a semicolon if
present, but do not issue an error message if it
is not present. */
if (cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto out;
}
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type
(declarator, decl_specifiers.type,
decl_specifiers.type_location);
/* Look for an asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* Look for attributes that apply to the declaration. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* If it's an `=', then we have a constant-initializer or a
pure-specifier. It is not correct to parse the
initializer before registering the member declaration
since the member declaration should be in scope while
its initializer is processed. However, the rest of the
front end does not yet provide an interface that allows
us to handle this correctly. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* In [class.mem]:
A pure-specifier shall be used only in the declaration of
a virtual function.
A member-declarator can contain a constant-initializer
only if it declares a static member of integral or
enumeration type.
Therefore, if the DECLARATOR is for a function, we look
for a pure-specifier; otherwise, we look for a
constant-initializer. When we call `grokfield', it will
perform more stringent semantics checks. */
initializer_token_start = cp_lexer_peek_token (parser->lexer);
if (function_declarator_p (declarator)
|| (decl_specifiers.type
&& TREE_CODE (decl_specifiers.type) == TYPE_DECL
&& (TREE_CODE (TREE_TYPE (decl_specifiers.type))
== FUNCTION_TYPE)))
initializer = cp_parser_pure_specifier (parser);
else if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else if (cxx_dialect >= cxx0x)
{
bool nonconst;
/* Don't require a constant rvalue in C++11, since we
might want a reference constant. We'll enforce
constancy later. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&nonconst);
}
else
/* Parse the initializer. */
initializer = cp_parser_constant_initializer (parser);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !function_declarator_p (declarator))
{
bool x;
if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else
initializer = cp_parser_initializer (parser, &x, &x);
}
/* Otherwise, there is no initializer. */
else
initializer = NULL_TREE;
/* See if we are probably looking at a function
definition. We are certainly not looking at a
member-declarator. Calling `grokfield' has
side-effects, so we must not do it unless we are sure
that we are looking at a member-declarator. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
{
/* The grammar does not allow a pure-specifier to be
used when a member function is defined. (It is
possible that this fact is an oversight in the
standard, since a pure function may be defined
outside of the class-specifier. */
if (initializer && initializer_token_start)
error_at (initializer_token_start->location,
"pure-specifier on function-definition");
decl = cp_parser_save_member_function_body (parser,
&decl_specifiers,
declarator,
attributes);
/* If the member was not a friend, declare it here. */
if (!friend_p)
finish_member_declaration (decl);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a semicolon, consume it. */
if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
goto out;
}
else
if (declarator->kind == cdk_function)
declarator->id_loc = token->location;
/* Create the declaration. */
decl = grokfield (declarator, &decl_specifiers,
initializer, /*init_const_expr_p=*/true,
asm_specification,
attributes);
}
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
/* If there is any qualification still in effect, clear it
now; we will be starting fresh with the next declarator. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* If it's a `,', then there are more declarators. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
/* If the next token isn't a `;', then we have a parse error. */
else if (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
/* The next token might be a ways away from where the
actual semicolon is missing. Find the previous token
and use that for our error position. */
cp_token *token = cp_lexer_previous_token (parser->lexer);
error_at (token->location,
"expected %<;%> at end of member declaration");
/* Assume that the user meant to provide a semicolon. If
we were to cp_parser_skip_to_end_of_statement, we might
skip to a semicolon inside a member function definition
and issue nonsensical error messages. */
assume_semicolon = true;
}
if (decl)
{
/* Add DECL to the list of members. */
if (!friend_p)
finish_member_declaration (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
else if (TREE_CODE (decl) == FIELD_DECL
&& !DECL_C_BIT_FIELD (decl)
&& DECL_INITIAL (decl))
/* Add DECL to the queue of NSDMI to be parsed later. */
VEC_safe_push (tree, gc, unparsed_nsdmis, decl);
}
if (assume_semicolon)
goto out;
}
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse a pure-specifier.
pure-specifier:
= 0
Returns INTEGER_ZERO_NODE if a pure specifier is found.
Otherwise, ERROR_MARK_NODE is returned. */
static tree
cp_parser_pure_specifier (cp_parser* parser)
{
cp_token *token;
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* Look for the `0' token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
return error_mark_node;
cp_lexer_consume_token (parser->lexer);
/* Accept = default or = delete in c++0x mode. */
if (token->keyword == RID_DEFAULT
|| token->keyword == RID_DELETE)
{
maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED);
return token->u.value;
}
/* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */
if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO))
{
cp_parser_error (parser,
"invalid pure specifier (only %<= 0%> is allowed)");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
{
error_at (token->location, "templates may not be %<virtual%>");
return error_mark_node;
}
return integer_zero_node;
}
/* Parse a constant-initializer.
constant-initializer:
= constant-expression
Returns a representation of the constant-expression. */
static tree
cp_parser_constant_initializer (cp_parser* parser)
{
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* It is invalid to write:
struct S { static const int i = { 7 }; };
*/
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_parser_error (parser,
"a brace-enclosed initializer is not allowed here");
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
/* Skip the initializer. */
cp_parser_skip_to_closing_brace (parser);
/* Look for the trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return error_mark_node;
}
return cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
/* Derived classes [gram.class.derived] */
/* Parse a base-clause.
base-clause:
: base-specifier-list
base-specifier-list:
base-specifier ... [opt]
base-specifier-list , base-specifier ... [opt]
Returns a TREE_LIST representing the base-classes, in the order in
which they were declared. The representation of each node is as
described by cp_parser_base_specifier.
In the case that no bases are specified, this function will return
NULL_TREE, not ERROR_MARK_NODE. */
static tree
cp_parser_base_clause (cp_parser* parser)
{
tree bases = NULL_TREE;
/* Look for the `:' that begins the list. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Scan the base-specifier-list. */
while (true)
{
cp_token *token;
tree base;
bool pack_expansion_p = false;
/* Look for the base-specifier. */
base = cp_parser_base_specifier (parser);
/* Look for the (optional) ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
/* Add BASE to the front of the list. */
if (base && base != error_mark_node)
{
if (pack_expansion_p)
/* Make this a pack expansion type. */
TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base));
if (!check_for_bare_parameter_packs (TREE_VALUE (base)))
{
TREE_CHAIN (base) = bases;
bases = base;
}
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a comma, then the list is complete. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
/* PARSER->SCOPE may still be non-NULL at this point, if the last
base class had a qualified name. However, the next name that
appears is certainly not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return nreverse (bases);
}
/* Parse a base-specifier.
base-specifier:
:: [opt] nested-name-specifier [opt] class-name
virtual access-specifier [opt] :: [opt] nested-name-specifier
[opt] class-name
access-specifier virtual [opt] :: [opt] nested-name-specifier
[opt] class-name
Returns a TREE_LIST. The TREE_PURPOSE will be one of
ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to
indicate the specifiers provided. The TREE_VALUE will be a TYPE
(or the ERROR_MARK_NODE) indicating the type that was specified. */
static tree
cp_parser_base_specifier (cp_parser* parser)
{
cp_token *token;
bool done = false;
bool virtual_p = false;
bool duplicate_virtual_error_issued_p = false;
bool duplicate_access_error_issued_p = false;
bool class_scope_p, template_p;
tree access = access_default_node;
tree type;
/* Process the optional `virtual' and `access-specifier'. */
while (!done)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Process `virtual'. */
switch (token->keyword)
{
case RID_VIRTUAL:
/* If `virtual' appears more than once, issue an error. */
if (virtual_p && !duplicate_virtual_error_issued_p)
{
cp_parser_error (parser,
"%<virtual%> specified more than once in base-specified");
duplicate_virtual_error_issued_p = true;
}
virtual_p = true;
/* Consume the `virtual' token. */
cp_lexer_consume_token (parser->lexer);
break;
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* If more than one access specifier appears, issue an
error. */
if (access != access_default_node
&& !duplicate_access_error_issued_p)
{
cp_parser_error (parser,
"more than one access specifier in base-specified");
duplicate_access_error_issued_p = true;
}
access = ridpointers[(int) token->keyword];
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
break;
default:
done = true;
break;
}
}
/* It is not uncommon to see programs mechanically, erroneously, use
the 'typename' keyword to denote (dependent) qualified types
as base classes. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
token = cp_lexer_peek_token (parser->lexer);
if (!processing_template_decl)
error_at (token->location,
"keyword %<typename%> not allowed outside of templates");
else
error_at (token->location,
"keyword %<typename%> not allowed in this context "
"(the base class is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to pretend that we have seen the `typename' keyword at this
point. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
typename_type,
/*is_declaration=*/true);
/* If the base class is given by a qualified name, assume that names
we see are type names or templates, as appropriate. */
class_scope_p = (parser->scope && TYPE_P (parser->scope));
template_p = class_scope_p && cp_parser_optional_template_keyword (parser);
if (!parser->scope
&& cp_lexer_next_token_is_decltype (parser->lexer))
/* DR 950 allows decltype as a base-specifier. */
type = cp_parser_decltype (parser);
else
{
/* Otherwise, look for the class-name. */
type = cp_parser_class_name (parser,
class_scope_p,
template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
type = TREE_TYPE (type);
}
if (type == error_mark_node)
return error_mark_node;
return finish_base_specifier (type, access, virtual_p);
}
/* Exception handling [gram.exception] */
/* Parse an (optional) noexcept-specification.
noexcept-specification:
noexcept ( constant-expression ) [opt]
If no noexcept-specification is present, returns NULL_TREE.
Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any
expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if
there are no parentheses. CONSUMED_EXPR will be set accordingly.
Otherwise, returns a noexcept specification unless RETURN_COND is true,
in which case a boolean condition is returned instead. */
static tree
cp_parser_noexcept_specification_opt (cp_parser* parser,
bool require_constexpr,
bool* consumed_expr,
bool return_cond)
{
cp_token *token;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
if (cp_parser_is_keyword (token, RID_NOEXCEPT))
{
tree expr;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer);
if (require_constexpr)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
expr = cp_parser_constant_expression (parser, false, NULL);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
{
expr = cp_parser_expression (parser, false, NULL);
*consumed_expr = true;
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
expr = boolean_true_node;
if (!require_constexpr)
*consumed_expr = false;
}
/* We cannot build a noexcept-spec right away because this will check
that expr is a constexpr. */
if (!return_cond)
return build_noexcept_spec (expr, tf_warning_or_error);
else
return expr;
}
else
return NULL_TREE;
}
/* Parse an (optional) exception-specification.
exception-specification:
throw ( type-id-list [opt] )
Returns a TREE_LIST representing the exception-specification. The
TREE_VALUE of each node is a type. */
static tree
cp_parser_exception_specification_opt (cp_parser* parser)
{
cp_token *token;
tree type_id_list;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL,
false);
if (type_id_list != NULL_TREE)
return type_id_list;
/* If it's not `throw', then there's no exception-specification. */
if (!cp_parser_is_keyword (token, RID_THROW))
return NULL_TREE;
#if 0
/* Enable this once a lot of code has transitioned to noexcept? */
if (cxx_dialect == cxx0x && !in_system_header)
warning (OPT_Wdeprecated, "dynamic exception specifications are "
"deprecated in C++0x; use %<noexcept%> instead");
#endif
/* Consume the `throw'. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a `)', then there is a type-id-list. */
if (token->type != CPP_CLOSE_PAREN)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
/* Parse the type-id-list. */
type_id_list = cp_parser_type_id_list (parser);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
type_id_list = empty_except_spec;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return type_id_list;
}
/* Parse an (optional) type-id-list.
type-id-list:
type-id ... [opt]
type-id-list , type-id ... [opt]
Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE,
in the order that the types were presented. */
static tree
cp_parser_type_id_list (cp_parser* parser)
{
tree types = NULL_TREE;
while (true)
{
cp_token *token;
tree type;
/* Get the next type-id. */
type = cp_parser_type_id (parser);
/* Parse the optional ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the type into a pack expansion expression. */
type = make_pack_expansion (type);
}
/* Add it to the list. */
types = add_exception_specifier (types, type, /*complain=*/1);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is not a `,', we are done. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (types);
}
/* Parse a try-block.
try-block:
try compound-statement handler-seq */
static tree
cp_parser_try_block (cp_parser* parser)
{
tree try_block;
cp_parser_require_keyword (parser, RID_TRY, RT_TRY);
try_block = begin_try_block ();
cp_parser_compound_statement (parser, NULL, true, false);
finish_try_block (try_block);
cp_parser_handler_seq (parser);
finish_handler_sequence (try_block);
return try_block;
}
/* Parse a function-try-block.
function-try-block:
try ctor-initializer [opt] function-body handler-seq */
static bool
cp_parser_function_try_block (cp_parser* parser)
{
tree compound_stmt;
tree try_block;
bool ctor_initializer_p;
/* Look for the `try' keyword. */
if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY))
return false;
/* Let the rest of the front end know where we are. */
try_block = begin_function_try_block (&compound_stmt);
/* Parse the function-body. */
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
/* We're done with the `try' part. */
finish_function_try_block (try_block);
/* Parse the handlers. */
cp_parser_handler_seq (parser);
/* We're done with the handlers. */
finish_function_handler_sequence (try_block, compound_stmt);
return ctor_initializer_p;
}
/* Parse a handler-seq.
handler-seq:
handler handler-seq [opt] */
static void
cp_parser_handler_seq (cp_parser* parser)
{
while (true)
{
cp_token *token;
/* Parse the handler. */
cp_parser_handler (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `catch' then there are no more handlers. */
if (!cp_parser_is_keyword (token, RID_CATCH))
break;
}
}
/* Parse a handler.
handler:
catch ( exception-declaration ) compound-statement */
static void
cp_parser_handler (cp_parser* parser)
{
tree handler;
tree declaration;
cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH);
handler = begin_handler ();
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
declaration = cp_parser_exception_declaration (parser);
finish_handler_parms (declaration, handler);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_compound_statement (parser, NULL, false, false);
finish_handler (handler);
}
/* Parse an exception-declaration.
exception-declaration:
type-specifier-seq declarator
type-specifier-seq abstract-declarator
type-specifier-seq
...
Returns a VAR_DECL for the declaration, or NULL_TREE if the
ellipsis variant is used. */
static tree
cp_parser_exception_declaration (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
const char *saved_message;
/* If it's an ellipsis, it's easy to handle. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
/* Types may not be defined in exception-declarations. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in exception-declarations");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
/* If it's a `)', then there is no declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
declarator = NULL;
else
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
if (!type_specifiers.any_specifiers_p)
return error_mark_node;
return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL);
}
/* Parse a throw-expression.
throw-expression:
throw assignment-expression [opt]
Returns a THROW_EXPR representing the throw-expression. */
static tree
cp_parser_throw_expression (cp_parser* parser)
{
tree expression;
cp_token* token;
cp_parser_require_keyword (parser, RID_THROW, RT_THROW);
token = cp_lexer_peek_token (parser->lexer);
/* Figure out whether or not there is an assignment-expression
following the "throw" keyword. */
if (token->type == CPP_COMMA
|| token->type == CPP_SEMICOLON
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_COLON)
expression = NULL_TREE;
else
expression = cp_parser_assignment_expression (parser,
/*cast_p=*/false, NULL);
return build_throw (expression);
}
/* GNU Extensions */
/* Parse an (optional) asm-specification.
asm-specification:
asm ( string-literal )
If the asm-specification is present, returns a STRING_CST
corresponding to the string-literal. Otherwise, returns
NULL_TREE. */
static tree
cp_parser_asm_specification_opt (cp_parser* parser)
{
cp_token *token;
tree asm_specification;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token isn't the `asm' keyword, then there's no
asm-specification. */
if (!cp_parser_is_keyword (token, RID_ASM))
return NULL_TREE;
/* Consume the `asm' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Look for the string-literal. */
asm_specification = cp_parser_string_literal (parser, false, false);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return asm_specification;
}
/* Parse an asm-operand-list.
asm-operand-list:
asm-operand
asm-operand-list , asm-operand
asm-operand:
string-literal ( expression )
[ string-literal ] string-literal ( expression )
Returns a TREE_LIST representing the operands. The TREE_VALUE of
each node is the expression. The TREE_PURPOSE is itself a
TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed
string-literal (or NULL_TREE if not present) and whose TREE_VALUE
is a STRING_CST for the string literal before the parenthesis. Returns
ERROR_MARK_NODE if any of the operands are invalid. */
static tree
cp_parser_asm_operand_list (cp_parser* parser)
{
tree asm_operands = NULL_TREE;
bool invalid_operands = false;
while (true)
{
tree string_literal;
tree expression;
tree name;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Read the operand name. */
name = cp_parser_identifier (parser);
if (name != error_mark_node)
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
else
name = NULL_TREE;
/* Look for the string-literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser, /*cast_p=*/false, NULL);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (name == error_mark_node
|| string_literal == error_mark_node
|| expression == error_mark_node)
invalid_operands = true;
/* Add this operand to the list. */
asm_operands = tree_cons (build_tree_list (name, string_literal),
expression,
asm_operands);
/* If the next token is not a `,', there are no more
operands. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return invalid_operands ? error_mark_node : nreverse (asm_operands);
}
/* Parse an asm-clobber-list.
asm-clobber-list:
string-literal
asm-clobber-list , string-literal
Returns a TREE_LIST, indicating the clobbers in the order that they
appeared. The TREE_VALUE of each node is a STRING_CST. */
static tree
cp_parser_asm_clobber_list (cp_parser* parser)
{
tree clobbers = NULL_TREE;
while (true)
{
tree string_literal;
/* Look for the string literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Add it to the list. */
clobbers = tree_cons (NULL_TREE, string_literal, clobbers);
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return clobbers;
}
/* Parse an asm-label-list.
asm-label-list:
identifier
asm-label-list , identifier
Returns a TREE_LIST, indicating the labels in the order that they
appeared. The TREE_VALUE of each node is a label. */
static tree
cp_parser_asm_label_list (cp_parser* parser)
{
tree labels = NULL_TREE;
while (true)
{
tree identifier, label, name;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (!error_operand_p (identifier))
{
label = lookup_label (identifier);
if (TREE_CODE (label) == LABEL_DECL)
{
TREE_USED (label) = 1;
check_goto (label);
name = build_string (IDENTIFIER_LENGTH (identifier),
IDENTIFIER_POINTER (identifier));
labels = tree_cons (name, label, labels);
}
}
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (labels);
}
/* Parse an (optional) series of attributes.
attributes:
attributes attribute
attribute:
__attribute__ (( attribute-list [opt] ))
The return value is as for cp_parser_attribute_list. */
static tree
cp_parser_attributes_opt (cp_parser* parser)
{
tree attributes = NULL_TREE;
while (true)
{
cp_token *token;
tree attribute_list;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `__attribute__', then we're done. */
if (token->keyword != RID_ATTRIBUTE)
break;
/* Consume the `__attribute__' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the two `(' tokens. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_CLOSE_PAREN)
/* Parse the attribute-list. */
attribute_list = cp_parser_attribute_list (parser);
else
/* If the next token is a `)', then there is no attribute
list. */
attribute_list = NULL;
/* Look for the two `)' tokens. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Add these new attributes to the list. */
attributes = chainon (attributes, attribute_list);
}
return attributes;
}
/* Parse an attribute-list.
attribute-list:
attribute
attribute-list , attribute
attribute:
identifier
identifier ( identifier )
identifier ( identifier , expression-list )
identifier ( expression-list )
Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds
to an attribute. The TREE_PURPOSE of each node is the identifier
indicating which attribute is in use. The TREE_VALUE represents
the arguments, if any. */
static tree
cp_parser_attribute_list (cp_parser* parser)
{
tree attribute_list = NULL_TREE;
bool save_translate_strings_p = parser->translate_strings_p;
parser->translate_strings_p = false;
while (true)
{
cp_token *token;
tree identifier;
tree attribute;
/* Look for the identifier. We also allow keywords here; for
example `__attribute__ ((const))' is legal. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->type == CPP_KEYWORD)
{
tree arguments = NULL_TREE;
/* Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
/* Save away the identifier that indicates which attribute
this is. */
identifier = (token->type == CPP_KEYWORD)
/* For keywords, use the canonical spelling, not the
parsed identifier. */
? ridpointers[(int) token->keyword]
: token->u.value;
attribute = build_tree_list (identifier, NULL_TREE);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an `(', then parse the attribute arguments. */
if (token->type == CPP_OPEN_PAREN)
{
VEC(tree,gc) *vec;
int attr_flag = (attribute_takes_identifier_p (identifier)
? id_attr : normal_attr);
vec = cp_parser_parenthesized_expression_list
(parser, attr_flag, /*cast_p=*/false,
/*allow_expansion_p=*/false,
/*non_constant_p=*/NULL);
if (vec == NULL)
arguments = error_mark_node;
else
{
arguments = build_tree_list_vec (vec);
release_tree_vector (vec);
}
/* Save the arguments away. */
TREE_VALUE (attribute) = arguments;
}
if (arguments != error_mark_node)
{
/* Add this attribute to the list. */
TREE_CHAIN (attribute) = attribute_list;
attribute_list = attribute;
}
token = cp_lexer_peek_token (parser->lexer);
}
/* Now, look for more attributes. If the next token isn't a
`,', we're done. */
if (token->type != CPP_COMMA)
break;
/* Consume the comma and keep going. */
cp_lexer_consume_token (parser->lexer);
}
parser->translate_strings_p = save_translate_strings_p;
/* We built up the list in reverse order. */
return nreverse (attribute_list);
}
/* Parse an optional `__extension__' keyword. Returns TRUE if it is
present, and FALSE otherwise. *SAVED_PEDANTIC is set to the
current value of the PEDANTIC flag, regardless of whether or not
the `__extension__' keyword is present. The caller is responsible
for restoring the value of the PEDANTIC flag. */
static bool
cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic)
{
/* Save the old value of the PEDANTIC flag. */
*saved_pedantic = pedantic;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION))
{
/* Consume the `__extension__' token. */
cp_lexer_consume_token (parser->lexer);
/* We're not being pedantic while the `__extension__' keyword is
in effect. */
pedantic = 0;
return true;
}
return false;
}
/* Parse a label declaration.
label-declaration:
__label__ label-declarator-seq ;
label-declarator-seq:
identifier , label-declarator-seq
identifier */
static void
cp_parser_label_declaration (cp_parser* parser)
{
/* Look for the `__label__' keyword. */
cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL);
while (true)
{
tree identifier;
/* Look for an identifier. */
identifier = cp_parser_identifier (parser);
/* If we failed, stop. */
if (identifier == error_mark_node)
break;
/* Declare it as a label. */
finish_label_decl (identifier);
/* If the next token is a `;', stop. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
break;
/* Look for the `,' separating the label declarations. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Support Functions */
/* Looks up NAME in the current scope, as given by PARSER->SCOPE.
NAME should have one of the representations used for an
id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE
is returned. If PARSER->SCOPE is a dependent type, then a
SCOPE_REF is returned.
If NAME is a TEMPLATE_ID_EXPR, then it will be immediately
returned; the name was already resolved when the TEMPLATE_ID_EXPR
was formed. Abstractly, such entities should not be passed to this
function, because they do not need to be looked up, but it is
simpler to check for this special case here, rather than at the
call-sites.
In cases not explicitly covered above, this function returns a
DECL, OVERLOAD, or baselink representing the result of the lookup.
If there was no entity with the indicated NAME, the ERROR_MARK_NODE
is returned.
If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword
(e.g., "struct") that was used. In that case bindings that do not
refer to types are ignored.
If IS_TEMPLATE is TRUE, bindings that do not refer to templates are
ignored.
If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces
are ignored.
If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent
types.
If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a
TREE_LIST of candidates if name-lookup results in an ambiguity, and
NULL_TREE otherwise. */
static tree
cp_parser_lookup_name (cp_parser *parser, tree name,
enum tag_types tag_type,
bool is_template,
bool is_namespace,
bool check_dependency,
tree *ambiguous_decls,
location_t name_location)
{
int flags = 0;
tree decl;
tree object_type = parser->context->object_type;
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
flags |= LOOKUP_COMPLAIN;
/* Assume that the lookup will be unambiguous. */
if (ambiguous_decls)
*ambiguous_decls = NULL_TREE;
/* Now that we have looked up the name, the OBJECT_TYPE (if any) is
no longer valid. Note that if we are parsing tentatively, and
the parse fails, OBJECT_TYPE will be automatically restored. */
parser->context->object_type = NULL_TREE;
if (name == error_mark_node)
return error_mark_node;
/* A template-id has already been resolved; there is no lookup to
do. */
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
return name;
if (BASELINK_P (name))
{
gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name))
== TEMPLATE_ID_EXPR);
return name;
}
/* A BIT_NOT_EXPR is used to represent a destructor. By this point,
it should already have been checked to make sure that the name
used matches the type being destroyed. */
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
tree type;
/* Figure out to which type this destructor applies. */
if (parser->scope)
type = parser->scope;
else if (object_type)
type = object_type;
else
type = current_class_type;
/* If that's not a class type, there is no destructor. */
if (!type || !CLASS_TYPE_P (type))
return error_mark_node;
if (CLASSTYPE_LAZY_DESTRUCTOR (type))
lazily_declare_fn (sfk_destructor, type);
if (!CLASSTYPE_DESTRUCTORS (type))
return error_mark_node;
/* If it was a class type, return the destructor. */
return CLASSTYPE_DESTRUCTORS (type);
}
/* By this point, the NAME should be an ordinary identifier. If
the id-expression was a qualified name, the qualifying scope is
stored in PARSER->SCOPE at this point. */
gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
/* Perform the lookup. */
if (parser->scope)
{
bool dependent_p;
if (parser->scope == error_mark_node)
return error_mark_node;
/* If the SCOPE is dependent, the lookup must be deferred until
the template is instantiated -- unless we are explicitly
looking up names in uninstantiated templates. Even then, we
cannot look up the name if the scope is not a class type; it
might, for example, be a template type parameter. */
dependent_p = (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope));
if ((check_dependency || !CLASS_TYPE_P (parser->scope))
&& dependent_p)
/* Defer lookup. */
decl = error_mark_node;
else
{
tree pushed_scope = NULL_TREE;
/* If PARSER->SCOPE is a dependent type, then it must be a
class type, and we must not be checking dependencies;
otherwise, we would have processed this lookup above. So
that PARSER->SCOPE is not considered a dependent base by
lookup_member, we must enter the scope here. */
if (dependent_p)
pushed_scope = push_scope (parser->scope);
/* If the PARSER->SCOPE is a template specialization, it
may be instantiated during name lookup. In that case,
errors may be issued. Even if we rollback the current
tentative parse, those errors are valid. */
decl = lookup_qualified_name (parser->scope, name,
tag_type != none_type,
/*complain=*/true);
/* 3.4.3.1: In a lookup in which the constructor is an acceptable
lookup result and the nested-name-specifier nominates a class C:
* if the name specified after the nested-name-specifier, when
looked up in C, is the injected-class-name of C (Clause 9), or
* if the name specified after the nested-name-specifier is the
same as the identifier or the simple-template-id's template-
name in the last component of the nested-name-specifier,
the name is instead considered to name the constructor of
class C. [ Note: for example, the constructor is not an
acceptable lookup result in an elaborated-type-specifier so
the constructor would not be used in place of the
injected-class-name. --end note ] Such a constructor name
shall be used only in the declarator-id of a declaration that
names a constructor or in a using-declaration. */
if (tag_type == none_type
&& DECL_SELF_REFERENCE_P (decl)
&& same_type_p (DECL_CONTEXT (decl), parser->scope))
decl = lookup_qualified_name (parser->scope, ctor_identifier,
tag_type != none_type,
/*complain=*/true);
/* If we have a single function from a using decl, pull it out. */
if (TREE_CODE (decl) == OVERLOAD
&& !really_overloaded_fn (decl))
decl = OVL_FUNCTION (decl);
if (pushed_scope)
pop_scope (pushed_scope);
}
/* If the scope is a dependent type and either we deferred lookup or
we did lookup but didn't find the name, rememeber the name. */
if (decl == error_mark_node && TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))
{
if (tag_type)
{
tree type;
/* The resolution to Core Issue 180 says that `struct
A::B' should be considered a type-name, even if `A'
is dependent. */
type = make_typename_type (parser->scope, name, tag_type,
/*complain=*/tf_error);
decl = TYPE_NAME (type);
}
else if (is_template
&& (cp_parser_next_token_ends_template_argument_p (parser)
|| cp_lexer_next_token_is (parser->lexer,
CPP_CLOSE_PAREN)))
decl = make_unbound_class_template (parser->scope,
name, NULL_TREE,
/*complain=*/tf_error);
else
decl = build_qualified_name (/*type=*/NULL_TREE,
parser->scope, name,
is_template);
}
parser->qualifying_scope = parser->scope;
parser->object_scope = NULL_TREE;
}
else if (object_type)
{
tree object_decl = NULL_TREE;
/* Look up the name in the scope of the OBJECT_TYPE, unless the
OBJECT_TYPE is not a class. */
if (CLASS_TYPE_P (object_type))
/* If the OBJECT_TYPE is a template specialization, it may
be instantiated during name lookup. In that case, errors
may be issued. Even if we rollback the current tentative
parse, those errors are valid. */
object_decl = lookup_member (object_type,
name,
/*protect=*/0,
tag_type != none_type,
tf_warning_or_error);
/* Look it up in the enclosing context, too. */
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->object_scope = object_type;
parser->qualifying_scope = NULL_TREE;
if (object_decl)
decl = object_decl;
}
else
{
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, flags);
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
/* If the lookup failed, let our caller know. */
if (!decl || decl == error_mark_node)
return error_mark_node;
/* Pull out the template from an injected-class-name (or multiple). */
if (is_template)
decl = maybe_get_template_decl_from_type_decl (decl);
/* If it's a TREE_LIST, the result of the lookup was ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
if (ambiguous_decls)
*ambiguous_decls = decl;
/* The error message we have to print is too complicated for
cp_parser_error, so we incorporate its actions directly. */
if (!cp_parser_simulate_error (parser))
{
error_at (name_location, "reference to %qD is ambiguous",
name);
print_candidates (decl);
}
return error_mark_node;
}
gcc_assert (DECL_P (decl)
|| TREE_CODE (decl) == OVERLOAD
|| TREE_CODE (decl) == SCOPE_REF
|| TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE
|| BASELINK_P (decl));
/* If we have resolved the name of a member declaration, check to
see if the declaration is accessible. When the name resolves to
set of overloaded functions, accessibility is checked when
overload resolution is done.
During an explicit instantiation, access is not checked at all,
as per [temp.explicit]. */
if (DECL_P (decl))
check_accessibility_of_qualified_id (decl, object_type, parser->scope);
maybe_record_typedef_use (decl);
return decl;
}
/* Like cp_parser_lookup_name, but for use in the typical case where
CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE,
IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */
static tree
cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location)
{
return cp_parser_lookup_name (parser, name,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
location);
}
/* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in
the current context, return the TYPE_DECL. If TAG_NAME_P is
true, the DECL indicates the class being defined in a class-head,
or declared in an elaborated-type-specifier.
Otherwise, return DECL. */
static tree
cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p)
{
/* If the TEMPLATE_DECL is being declared as part of a class-head,
the translation from TEMPLATE_DECL to TYPE_DECL occurs:
struct A {
template <typename T> struct B;
};
template <typename T> struct A::B {};
Similarly, in an elaborated-type-specifier:
namespace N { struct X{}; }
struct A {
template <typename T> friend struct N::X;
};
However, if the DECL refers to a class type, and we are in
the scope of the class, then the name lookup automatically
finds the TYPE_DECL created by build_self_reference rather
than a TEMPLATE_DECL. For example, in:
template <class T> struct S {
S s;
};
there is no need to handle such case. */
if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p)
return DECL_TEMPLATE_RESULT (decl);
return decl;
}
/* If too many, or too few, template-parameter lists apply to the
declarator, issue an error message. Returns TRUE if all went well,
and FALSE otherwise. */
static bool
cp_parser_check_declarator_template_parameters (cp_parser* parser,
cp_declarator *declarator,
location_t declarator_location)
{
unsigned num_templates;
/* We haven't seen any classes that involve template parameters yet. */
num_templates = 0;
switch (declarator->kind)
{
case cdk_id:
if (declarator->u.id.qualifying_scope)
{
tree scope;
scope = declarator->u.id.qualifying_scope;
while (scope && CLASS_TYPE_P (scope))
{
/* You're supposed to have one `template <...>'
for every template class, but you don't need one
for a full specialization. For example:
template <class T> struct S{};
template <> struct S<int> { void f(); };
void S<int>::f () {}
is correct; there shouldn't be a `template <>' for
the definition of `S<int>::f'. */
if (!CLASSTYPE_TEMPLATE_INFO (scope))
/* If SCOPE does not have template information of any
kind, then it is not a template, nor is it nested
within a template. */
break;
if (explicit_class_specialization_p (scope))
break;
if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)))
++num_templates;
scope = TYPE_CONTEXT (scope);
}
}
else if (TREE_CODE (declarator->u.id.unqualified_name)
== TEMPLATE_ID_EXPR)
/* If the DECLARATOR has the form `X<y>' then it uses one
additional level of template parameters. */
++num_templates;
return cp_parser_check_template_parameters
(parser, num_templates, declarator_location, declarator);
case cdk_function:
case cdk_array:
case cdk_pointer:
case cdk_reference:
case cdk_ptrmem:
return (cp_parser_check_declarator_template_parameters
(parser, declarator->declarator, declarator_location));
case cdk_error:
return true;
default:
gcc_unreachable ();
}
return false;
}
/* NUM_TEMPLATES were used in the current declaration. If that is
invalid, return FALSE and issue an error messages. Otherwise,
return TRUE. If DECLARATOR is non-NULL, then we are checking a
declarator and we can print more accurate diagnostics. */
static bool
cp_parser_check_template_parameters (cp_parser* parser,
unsigned num_templates,
location_t location,
cp_declarator *declarator)
{
/* If there are the same number of template classes and parameter
lists, that's OK. */
if (parser->num_template_parameter_lists == num_templates)
return true;
/* If there are more, but only one more, then we are referring to a
member template. That's OK too. */
if (parser->num_template_parameter_lists == num_templates + 1)
return true;
/* If there are more template classes than parameter lists, we have
something like:
template <class T> void S<T>::R<T>::f (); */
if (parser->num_template_parameter_lists < num_templates)
{
if (declarator && !current_function_decl)
error_at (location, "specializing member %<%T::%E%> "
"requires %<template<>%> syntax",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else if (declarator)
error_at (location, "invalid declaration of %<%T::%E%>",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else
error_at (location, "too few template-parameter-lists");
return false;
}
/* Otherwise, there are too many template parameter lists. We have
something like:
template <class T> template <class U> void S::f(); */
error_at (location, "too many template-parameter-lists");
return false;
}
/* Parse an optional `::' token indicating that the following name is
from the global namespace. If so, PARSER->SCOPE is set to the
GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE,
unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone.
Returns the new value of PARSER->SCOPE, if the `::' token is
present, and NULL_TREE otherwise. */
static tree
cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a `::' token then we're starting from the
global namespace, not our current location. */
if (token->type == CPP_SCOPE)
{
/* Consume the `::' token. */
cp_lexer_consume_token (parser->lexer);
/* Set the SCOPE so that we know where to start the lookup. */
parser->scope = global_namespace;
parser->qualifying_scope = global_namespace;
parser->object_scope = NULL_TREE;
return parser->scope;
}
else if (!current_scope_valid_p)
{
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
return NULL_TREE;
}
/* Returns TRUE if the upcoming token sequence is the start of a
constructor declarator. If FRIEND_P is true, the declarator is
preceded by the `friend' specifier. */
static bool
cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p)
{
bool constructor_p;
tree nested_name_specifier;
cp_token *next_token;
/* The common case is that this is not a constructor declarator, so
try to avoid doing lots of work if at all possible. It's not
valid declare a constructor at function scope. */
if (parser->in_function_body)
return false;
/* And only certain tokens can begin a constructor declarator. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type != CPP_NAME
&& next_token->type != CPP_SCOPE
&& next_token->type != CPP_NESTED_NAME_SPECIFIER
&& next_token->type != CPP_TEMPLATE_ID)
return false;
/* Parse tentatively; we are going to roll back all of the tokens
consumed here. */
cp_parser_parse_tentatively (parser);
/* Assume that we are looking at a constructor declarator. */
constructor_p = true;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
nested_name_specifier
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false));
/* Outside of a class-specifier, there must be a
nested-name-specifier. */
if (!nested_name_specifier &&
(!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type)
|| friend_p))
constructor_p = false;
else if (nested_name_specifier == error_mark_node)
constructor_p = false;
/* If we have a class scope, this is easy; DR 147 says that S::S always
names the constructor, and no other qualified name could. */
if (constructor_p && nested_name_specifier
&& CLASS_TYPE_P (nested_name_specifier))
{
tree id = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*declarator_p=*/true,
/*optional_p=*/false);
if (is_overloaded_fn (id))
id = DECL_NAME (get_first_fn (id));
if (!constructor_name_p (id, nested_name_specifier))
constructor_p = false;
}
/* If we still think that this might be a constructor-declarator,
look for a class-name. */
else if (constructor_p)
{
/* If we have:
template <typename T> struct S {
S();
};
we must recognize that the nested `S' names a class. */
tree type_decl;
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/false,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If there was no class-name, then this is not a constructor. */
constructor_p = !cp_parser_error_occurred (parser);
/* If we're still considering a constructor, we have to see a `(',
to begin the parameter-declaration-clause, followed by either a
`)', an `...', or a decl-specifier. We need to check for a
type-specifier to avoid being fooled into thinking that:
S (f) (int);
is a constructor. (It is actually a function named `f' that
takes one parameter (of type `int') and returns a value of type
`S'. */
if (constructor_p
&& !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
constructor_p = false;
if (constructor_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS)
/* A parameter declaration begins with a decl-specifier,
which is either the "attribute" keyword, a storage class
specifier, or (usually) a type-specifier. */
&& !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer))
{
tree type;
tree pushed_scope = NULL_TREE;
unsigned saved_num_template_parameter_lists;
/* Names appearing in the type-specifier should be looked up
in the scope of the class. */
if (current_class_type)
type = NULL_TREE;
else
{
type = TREE_TYPE (type_decl);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
type = resolve_typename_type (type,
/*only_current_p=*/false);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
}
pushed_scope = push_scope (type);
}
/* Inside the constructor parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* Look for the type-specifier. */
cp_parser_type_specifier (parser,
CP_PARSER_FLAGS_NONE,
/*decl_specs=*/NULL,
/*is_declarator=*/true,
/*declares_class_or_enum=*/NULL,
/*is_cv_qualifier=*/NULL);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Leave the scope of the class. */
if (pushed_scope)
pop_scope (pushed_scope);
constructor_p = !cp_parser_error_occurred (parser);
}
}
/* We did not really want to consume any tokens. */
cp_parser_abort_tentative_parse (parser);
return constructor_p;
}
/* Parse the definition of the function given by the DECL_SPECIFIERS,
ATTRIBUTES, and DECLARATOR. The access checks have been deferred;
they must be performed once we are in the scope of the function.
Returns the function defined. */
static tree
cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
tree attributes,
const cp_declarator *declarator)
{
tree fn;
bool success_p;
/* Begin the function-definition. */
success_p = start_function (decl_specifiers, declarator, attributes);
/* The things we're about to see are not directly qualified by any
template headers we've seen thus far. */
reset_specialization ();
/* If there were names looked up in the decl-specifier-seq that we
did not check, check them now. We must wait until we are in the
scope of the function to perform the checks, since the function
might be a friend. */
perform_deferred_access_checks ();
if (!success_p)
{
/* Skip the entire function. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = error_mark_node;
}
else if (DECL_INITIAL (current_function_decl) != error_mark_node)
{
/* Seen already, skip it. An error message has already been output. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = current_function_decl;
current_function_decl = NULL_TREE;
/* If this is a function from a class, pop the nested class. */
if (current_class_name)
pop_nested_class ();
}
else
{
timevar_id_t tv;
if (DECL_DECLARED_INLINE_P (current_function_decl))
tv = TV_PARSE_INLINE;
else
tv = TV_PARSE_FUNC;
timevar_push (tv);
fn = cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/false);
timevar_pop (tv);
}
return fn;
}
/* Parse the part of a function-definition that follows the
declarator. INLINE_P is TRUE iff this function is an inline
function defined within a class-specifier.
Returns the function defined. */
static tree
cp_parser_function_definition_after_declarator (cp_parser* parser,
bool inline_p)
{
tree fn;
bool ctor_initializer_p = false;
bool saved_in_unbraced_linkage_specification_p;
bool saved_in_function_body;
unsigned saved_num_template_parameter_lists;
cp_token *token;
saved_in_function_body = parser->in_function_body;
parser->in_function_body = true;
/* If the next token is `return', then the code may be trying to
make use of the "named return value" extension that G++ used to
support. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN))
{
/* Consume the `return' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier that indicates what value is to be
returned. */
cp_parser_identifier (parser);
/* Issue an error message. */
error_at (token->location,
"named return values are no longer supported");
/* Skip tokens until we reach the start of the function body. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
cp_lexer_consume_token (parser->lexer);
}
}
/* The `extern' in `extern "C" void f () { ... }' does not apply to
anything declared inside `f'. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Inside the function, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
start_lambda_scope (current_function_decl);
/* If the next token is `try', `__transaction_atomic', or
`__transaction_relaxed`, then we are looking at either function-try-block
or function-transaction-block. Note that all of these include the
function-body. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_ATOMIC);
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TRANSACTION_RELAXED))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_RELAXED);
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
finish_lambda_scope ();
/* Finish the function. */
fn = finish_function ((ctor_initializer_p ? 1 : 0) |
(inline_p ? 2 : 0));
/* Generate code for it, if necessary. */
expand_or_defer_fn (fn);
/* Restore the saved values. */
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_function_body = saved_in_function_body;
return fn;
}
/* Parse a template-declaration, assuming that the `export' (and
`extern') keywords, if present, has already been scanned. MEMBER_P
is as for cp_parser_template_declaration. */
static void
cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p)
{
tree decl = NULL_TREE;
VEC (deferred_access_check,gc) *checks;
tree parameter_list;
bool friend_p = false;
bool need_lang_pop;
cp_token *token;
/* Look for the `template' keyword. */
token = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE))
return;
/* And the `<'. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
return;
if (at_class_scope_p () && current_function_decl)
{
/* 14.5.2.2 [temp.mem]
A local class shall not have member templates. */
error_at (token->location,
"invalid declaration of member template in local class");
cp_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* [temp]
A template ... shall not have C linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* We cannot perform access checks on the template parameter
declarations until we know what is being declared, just as we
cannot check the decl-specifier list. */
push_deferring_access_checks (dk_deferred);
/* If the next token is `>', then we have an invalid
specialization. Rather than complain about an invalid template
parameter, issue an error message here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER))
{
cp_parser_error (parser, "invalid explicit specialization");
begin_specialization ();
parameter_list = NULL_TREE;
}
else
{
/* Parse the template parameters. */
parameter_list = cp_parser_template_parameter_list (parser);
}
/* Get the deferred access checks from the parameter list. These
will be checked once we know what is being declared, as for a
member template the checks must be performed in the scope of the
class containing the member. */
checks = get_deferred_access_checks ();
/* Look for the `>'. */
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* We just processed one more parameter list. */
++parser->num_template_parameter_lists;
/* If the next token is `template', there are more template
parameters. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TEMPLATE))
cp_parser_template_declaration_after_export (parser, member_p);
else if (cxx_dialect >= cxx0x
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
decl = cp_parser_alias_declaration (parser);
else
{
/* There are no access checks when parsing a template, as we do not
know if a specialization will be a friend. */
push_deferring_access_checks (dk_no_check);
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_single_declaration (parser,
checks,
member_p,
/*explicit_specialization_p=*/false,
&friend_p);
pop_deferring_access_checks ();
/* If this is a member template declaration, let the front
end know. */
if (member_p && !friend_p && decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
cp_parser_check_access_in_redeclaration (decl, token->location);
decl = finish_member_template_decl (decl);
}
else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL)
make_friend_class (current_class_type, TREE_TYPE (decl),
/*complain=*/true);
}
/* We are done with the current parameter list. */
--parser->num_template_parameter_lists;
pop_deferring_access_checks ();
/* Finish up. */
finish_template_decl (parameter_list);
/* Check the template arguments for a literal operator template. */
if (decl
&& (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl))
&& UDLIT_OPER_P (DECL_NAME (decl)))
{
bool ok = true;
if (parameter_list == NULL_TREE)
ok = false;
else
{
int num_parms = TREE_VEC_LENGTH (parameter_list);
if (num_parms != 1)
ok = false;
else
{
tree parm_list = TREE_VEC_ELT (parameter_list, 0);
tree parm = INNERMOST_TEMPLATE_PARMS (parm_list);
if (TREE_TYPE (parm) != char_type_node
|| !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
ok = false;
}
}
if (!ok)
error ("literal operator template %qD has invalid parameter list."
" Expected non-type template argument pack <char...>",
decl);
}
/* Register member declarations. */
if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl))
finish_member_declaration (decl);
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* If DECL is a function template, we must return to parse it later.
(Even though there is no definition, there might be default
arguments that need handling.) */
if (member_p && decl
&& (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_FUNCTION_TEMPLATE_P (decl)))
VEC_safe_push (tree, gc, unparsed_funs_with_definitions, decl);
}
/* Perform the deferred access checks from a template-parameter-list.
CHECKS is a TREE_LIST of access checks, as returned by
get_deferred_access_checks. */
static void
cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks)
{
++processing_template_parmlist;
perform_access_checks (checks);
--processing_template_parmlist;
}
/* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or
`function-definition' sequence. MEMBER_P is true, this declaration
appears in a class scope.
Returns the DECL for the declared entity. If FRIEND_P is non-NULL,
*FRIEND_P is set to TRUE iff the declaration is a friend. */
static tree
cp_parser_single_declaration (cp_parser* parser,
VEC (deferred_access_check,gc)* checks,
bool member_p,
bool explicit_specialization_p,
bool* friend_p)
{
int declares_class_or_enum;
tree decl = NULL_TREE;
cp_decl_specifier_seq decl_specifiers;
bool function_definition_p = false;
cp_token *decl_spec_token_start;
/* This function is only used when processing a template
declaration. */
gcc_assert (innermost_scope_kind () == sk_template_parms
|| innermost_scope_kind () == sk_template_spec);
/* Defer access checks until we know what is being declared. */
push_deferring_access_checks (dk_deferred);
/* Try the `decl-specifier-seq [opt] init-declarator [opt]'
alternative. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
if (friend_p)
*friend_p = cp_parser_friend_p (&decl_specifiers);
/* There are no template typedefs. */
if (decl_specifiers.specs[(int) ds_typedef])
{
error_at (decl_spec_token_start->location,
"template declaration of %<typedef%>");
decl = error_mark_node;
}
/* Gather up the access checks that occurred the
decl-specifier-seq. */
stop_deferring_access_checks ();
/* Check for the declaration of a template class. */
if (declares_class_or_enum)
{
if (cp_parser_declares_only_class_p (parser))
{
decl = shadow_tag (&decl_specifiers);
/* In this case:
struct C {
friend template <typename T> struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by shadow_tag. */
if (friend_p && *friend_p
&& !decl
&& decl_specifiers.type
&& TYPE_P (decl_specifiers.type))
decl = decl_specifiers.type;
if (decl && decl != error_mark_node)
decl = TYPE_NAME (decl);
else
decl = error_mark_node;
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
}
}
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* cp_parser_parse_and_diagnose_invalid_type_name calls
cp_parser_skip_to_end_of_block_or_statement, so don't try to parse
the rest of this declaration. */
decl = error_mark_node;
goto out;
}
/* If it's not a template class, try for a template function. If
the next token is a `;', then this declaration does not declare
anything. But, if there were errors in the decl-specifiers, then
the error might well have come from an attempted class-specifier.
In that case, there's no need to warn about a missing declarator. */
if (!decl
&& (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
|| decl_specifiers.type != error_mark_node))
{
decl = cp_parser_init_declarator (parser,
&decl_specifiers,
checks,
/*function_definition_allowed_p=*/true,
member_p,
declares_class_or_enum,
&function_definition_p,
NULL);
/* 7.1.1-1 [dcl.stc]
A storage-class-specifier shall not be specified in an explicit
specialization... */
if (decl
&& explicit_specialization_p
&& decl_specifiers.storage_class != sc_none)
{
error_at (decl_spec_token_start->location,
"explicit template specialization cannot have a storage class");
decl = error_mark_node;
}
}
/* Look for a trailing `;' after the declaration. */
if (!function_definition_p
&& (decl == error_mark_node
|| !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)))
cp_parser_skip_to_end_of_block_or_statement (parser);
out:
pop_deferring_access_checks ();
/* Clear any current qualification; whatever comes next is the start
of something new. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return decl;
}
/* Parse a cast-expression that is not the operand of a unary "&". */
static tree
cp_parser_simple_cast_expression (cp_parser *parser)
{
return cp_parser_cast_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
}
/* Parse a functional cast to TYPE. Returns an expression
representing the cast. */
static tree
cp_parser_functional_cast (cp_parser* parser, tree type)
{
VEC(tree,gc) *vec;
tree expression_list;
tree cast;
bool nonconst_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &nonconst_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
return finish_compound_literal (type, expression_list,
tf_warning_or_error);
}
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/true,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
expression_list = error_mark_node;
else
{
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
cast = build_functional_cast (type, expression_list,
tf_warning_or_error);
/* [expr.const]/1: In an integral constant expression "only type
conversions to integral or enumeration type can be used". */
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
if (cast != error_mark_node
&& !cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CONSTRUCTOR))
return error_mark_node;
return cast;
}
/* Save the tokens that make up the body of a member function defined
in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have
already been parsed. The ATTRIBUTES are any GNU "__attribute__"
specifiers applied to the declaration. Returns the FUNCTION_DECL
for the member function. */
static tree
cp_parser_save_member_function_body (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree attributes)
{
cp_token *first;
cp_token *last;
tree fn;
/* Create the FUNCTION_DECL. */
fn = grokmethod (decl_specifiers, declarator, attributes);
/* If something went badly wrong, bail out now. */
if (fn == error_mark_node)
{
/* If there's a function-body, skip it. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* Remember it, if there default args to post process. */
cp_parser_save_default_args (parser, fn);
/* Save away the tokens that make up the body of the
function. */
first = parser->lexer->next_token;
/* We can have braced-init-list mem-initializers before the fn body. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_lexer_consume_token (parser->lexer);
while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& cp_lexer_next_token_is_not_keyword (parser->lexer, RID_TRY))
{
/* cache_group will stop after an un-nested { } pair, too. */
if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0))
break;
/* variadic mem-inits have ... after the ')'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
cp_lexer_consume_token (parser->lexer);
}
}
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
/* Handle function try blocks. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH))
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
last = parser->lexer->next_token;
/* Save away the inline definition; we will process it when the
class is complete. */
DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last);
DECL_PENDING_INLINE_P (fn) = 1;
/* We need to know that this was defined in the class, so that
friend templates are handled correctly. */
DECL_INITIALIZED_IN_CLASS_P (fn) = 1;
/* Add FN to the queue of functions to be parsed later. */
VEC_safe_push (tree, gc, unparsed_funs_with_definitions, fn);
return fn;
}
/* Save the tokens that make up the in-class initializer for a non-static
data member. Returns a DEFAULT_ARG. */
static tree
cp_parser_save_nsdmi (cp_parser* parser)
{
return cp_parser_cache_defarg (parser, /*nsdmi=*/true);
}
/* Parse a template-argument-list, as well as the trailing ">" (but
not the opening "<"). See cp_parser_template_argument_list for the
return value. */
static tree
cp_parser_enclosed_template_argument_list (cp_parser* parser)
{
tree arguments;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
bool saved_greater_than_is_operator_p;
int saved_unevaluated_operand;
int saved_inhibit_evaluation_warnings;
/* [temp.names]
When parsing a template-id, the first non-nested `>' is taken as
the end of the template-argument-list rather than a greater-than
operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = false;
/* Parsing the argument list may modify SCOPE, so we save it
here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
saved_unevaluated_operand = cp_unevaluated_operand;
cp_unevaluated_operand = 0;
saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
c_inhibit_evaluation_warnings = 0;
/* Parse the template-argument-list itself. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)
|| cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
arguments = NULL_TREE;
else
arguments = cp_parser_template_argument_list (parser);
/* Look for the `>' that ends the template-argument-list. If we find
a '>>' instead, it's probably just a typo. */
if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
if (cxx_dialect != cxx98)
{
/* In C++0x, a `>>' in a template argument list or cast
expression is considered to be two separate `>'
tokens. So, change the current token to a `>', but don't
consume it: it will be consumed later when the outer
template argument list (or cast expression) is parsed.
Note that this replacement of `>' for `>>' is necessary
even if we are parsing tentatively: in the tentative
case, after calling
cp_parser_enclosed_template_argument_list we will always
throw away all of the template arguments and the first
closing `>', either because the template argument list
was erroneous or because we are replacing those tokens
with a CPP_TEMPLATE_ID token. The second `>' (which will
not have been thrown away) is needed either to close an
outer template argument list or to complete a new-style
cast. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_GREATER;
}
else if (!saved_greater_than_is_operator_p)
{
/* If we're in a nested template argument list, the '>>' has
to be a typo for '> >'. We emit the error message, but we
continue parsing and we push a '>' as next token, so that
the argument list will be parsed correctly. Note that the
global source location is still on the token before the
'>>', so we need to say explicitly where we want it. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location, "%<>>%> should be %<> >%> "
"within a nested template argument list");
token->type = CPP_GREATER;
}
else
{
/* If this is not a nested template argument list, the '>>'
is a typo for '>'. Emit an error message and continue.
Same deal about the token location, but here we can get it
right by consuming the '>>' before issuing the diagnostic. */
cp_token *token = cp_lexer_consume_token (parser->lexer);
error_at (token->location,
"spurious %<>>%>, use %<>%> to terminate "
"a template argument list");
}
}
else
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* The `>' token might be a greater-than operator again now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Restore the SAVED_SCOPE. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return arguments;
}
/* MEMBER_FUNCTION is a member function, or a friend. If default
arguments, or the body of the function have not yet been parsed,
parse them now. */
static void
cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function)
{
timevar_push (TV_PARSE_INMETH);
/* If this member is a template, get the underlying
FUNCTION_DECL. */
if (DECL_FUNCTION_TEMPLATE_P (member_function))
member_function = DECL_TEMPLATE_RESULT (member_function);
/* There should not be any class definitions in progress at this
point; the bodies of members are only parsed outside of all class
definitions. */
gcc_assert (parser->num_classes_being_defined == 0);
/* While we're parsing the member functions we might encounter more
classes. We want to handle them right away, but we don't want
them getting mixed up with functions that are currently in the
queue. */
push_unparsed_function_queues (parser);
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (member_function);
/* If the body of the function has not yet been parsed, parse it
now. */
if (DECL_PENDING_INLINE_P (member_function))
{
tree function_scope;
cp_token_cache *tokens;
/* The function is no longer pending; we are processing it. */
tokens = DECL_PENDING_INLINE_INFO (member_function);
DECL_PENDING_INLINE_INFO (member_function) = NULL;
DECL_PENDING_INLINE_P (member_function) = 0;
/* If this is a local class, enter the scope of the containing
function. */
function_scope = current_function_decl;
if (function_scope)
push_function_context ();
/* Push the body of the function onto the lexer stack. */
cp_parser_push_lexer_for_tokens (parser, tokens);
/* Let the front end know that we going to be defining this
function. */
start_preparsed_function (member_function, NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
/* Don't do access checking if it is a templated function. */
if (processing_template_decl)
push_deferring_access_checks (dk_no_check);
/* Now, parse the body of the function. */
cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/true);
if (processing_template_decl)
pop_deferring_access_checks ();
/* Leave the scope of the containing function. */
if (function_scope)
pop_function_context ();
cp_parser_pop_lexer (parser);
}
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
/* Restore the queue. */
pop_unparsed_function_queues (parser);
timevar_pop (TV_PARSE_INMETH);
}
/* If DECL contains any default args, remember it on the unparsed
functions queue. */
static void
cp_parser_save_default_args (cp_parser* parser, tree decl)
{
tree probe;
for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl));
probe;
probe = TREE_CHAIN (probe))
if (TREE_PURPOSE (probe))
{
cp_default_arg_entry *entry
= VEC_safe_push (cp_default_arg_entry, gc,
unparsed_funs_with_default_args, NULL);
entry->class_type = current_class_type;
entry->decl = decl;
break;
}
}
/* DEFAULT_ARG contains the saved tokens for the initializer of DECL,
which is either a FIELD_DECL or PARM_DECL. Parse it and return
the result. For a PARM_DECL, PARMTYPE is the corresponding type
from the parameter-type-list. */
static tree
cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl,
tree default_arg, tree parmtype)
{
cp_token_cache *tokens;
tree parsed_arg;
bool dummy;
if (default_arg == error_mark_node)
return error_mark_node;
/* Push the saved tokens for the default argument onto the parser's
lexer stack. */
tokens = DEFARG_TOKENS (default_arg);
cp_parser_push_lexer_for_tokens (parser, tokens);
start_lambda_scope (decl);
/* Parse the default argument. */
parsed_arg = cp_parser_initializer (parser, &dummy, &dummy);
if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
finish_lambda_scope ();
if (!processing_template_decl)
{
/* In a non-template class, check conversions now. In a template,
we'll wait and instantiate these as needed. */
if (TREE_CODE (decl) == PARM_DECL)
parsed_arg = check_default_argument (parmtype, parsed_arg);
else
{
int flags = LOOKUP_IMPLICIT;
if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg)
&& CONSTRUCTOR_IS_DIRECT_INIT (parsed_arg))
flags = LOOKUP_NORMAL;
parsed_arg = digest_init_flags (TREE_TYPE (decl), parsed_arg, flags);
}
}
/* If the token stream has not been completely used up, then
there was extra junk after the end of the default
argument. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
if (TREE_CODE (decl) == PARM_DECL)
cp_parser_error (parser, "expected %<,%>");
else
cp_parser_error (parser, "expected %<;%>");
}
/* Revert to the main lexer. */
cp_parser_pop_lexer (parser);
return parsed_arg;
}
/* FIELD is a non-static data member with an initializer which we saved for
later; parse it now. */
static void
cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field)
{
tree def;
push_unparsed_function_queues (parser);
def = cp_parser_late_parse_one_default_arg (parser, field,
DECL_INITIAL (field),
NULL_TREE);
pop_unparsed_function_queues (parser);
DECL_INITIAL (field) = def;
}
/* FN is a FUNCTION_DECL which may contains a parameter with an
unparsed DEFAULT_ARG. Parse the default args now. This function
assumes that the current scope is the scope in which the default
argument should be processed. */
static void
cp_parser_late_parsing_default_args (cp_parser *parser, tree fn)
{
bool saved_local_variables_forbidden_p;
tree parm, parmdecl;
/* While we're parsing the default args, we might (due to the
statement expression extension) encounter more classes. We want
to handle them right away, but we don't want them getting mixed
up with default args that are currently in the queue. */
push_unparsed_function_queues (parser);
/* Local variable names (and the `this' keyword) may not appear
in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
push_defarg_context (fn);
for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)),
parmdecl = DECL_ARGUMENTS (fn);
parm && parm != void_list_node;
parm = TREE_CHAIN (parm),
parmdecl = DECL_CHAIN (parmdecl))
{
tree default_arg = TREE_PURPOSE (parm);
tree parsed_arg;
VEC(tree,gc) *insts;
tree copy;
unsigned ix;
if (!default_arg)
continue;
if (TREE_CODE (default_arg) != DEFAULT_ARG)
/* This can happen for a friend declaration for a function
already declared with default arguments. */
continue;
parsed_arg
= cp_parser_late_parse_one_default_arg (parser, parmdecl,
default_arg,
TREE_VALUE (parm));
if (parsed_arg == error_mark_node)
{
continue;
}
TREE_PURPOSE (parm) = parsed_arg;
/* Update any instantiations we've already created. */
for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0;
VEC_iterate (tree, insts, ix, copy); ix++)
TREE_PURPOSE (copy) = parsed_arg;
}
pop_defarg_context ();
/* Make sure no default arg is missing. */
check_default_args (fn);
/* Restore the state of local_variables_forbidden_p. */
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
/* Restore the queue. */
pop_unparsed_function_queues (parser);
}
/* Parse the operand of `sizeof' (or a similar operator). Returns
either a TYPE or an expression, depending on the form of the
input. The KEYWORD indicates which kind of expression we have
encountered. */
static tree
cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword)
{
tree expr = NULL_TREE;
const char *saved_message;
char *tmp;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool pack_expansion_p = false;
/* Types cannot be defined in a `sizeof' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
tmp = concat ("types may not be defined in %<",
IDENTIFIER_POINTER (ridpointers[keyword]),
"%> expressions", NULL);
parser->type_definition_forbidden_message = tmp;
/* The restrictions on constant-expressions do not apply inside
sizeof expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* If it's a `...', then we are computing the length of a parameter
pack. */
if (keyword == RID_SIZEOF
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
/* Note that this is an expansion. */
pack_expansion_p = true;
}
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
/* If it's a `(', then we might be looking at the type-id
construction. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type;
bool saved_in_type_id_in_expr_p;
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Now, look for the trailing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, then we're done. */
if (cp_parser_parse_definitely (parser))
{
cp_decl_specifier_seq decl_specs;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
/* Call grokdeclarator to figure out what type this is. */
expr = grokdeclarator (NULL,
&decl_specs,
TYPENAME,
/*initialized=*/0,
/*attrlist=*/NULL);
}
}
/* If the type-id production did not work out, then we must be
looking at the unary-expression production. */
if (!expr)
expr = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (pack_expansion_p)
/* Build a pack expansion. */
expr = make_pack_expansion (expr);
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* Free the message we created. */
free (tmp);
/* And restore the old one. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expr;
}
/* If the current declaration has no declarator, return true. */
static bool
cp_parser_declares_only_class_p (cp_parser *parser)
{
/* If the next token is a `;' or a `,' then there is no
declarator. */
return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
}
/* Update the DECL_SPECS to reflect the storage class indicated by
KEYWORD. */
static void
cp_parser_set_storage_class (cp_parser *parser,
cp_decl_specifier_seq *decl_specs,
enum rid keyword,
location_t location)
{
cp_storage_class storage_class;
if (parser->in_unbraced_linkage_specification_p)
{
error_at (location, "invalid use of %qD in linkage specification",
ridpointers[keyword]);
return;
}
else if (decl_specs->storage_class != sc_none)
{
decl_specs->conflicting_specifiers_p = true;
return;
}
if ((keyword == RID_EXTERN || keyword == RID_STATIC)
&& decl_specs->specs[(int) ds_thread])
{
error_at (location, "%<__thread%> before %qD", ridpointers[keyword]);
decl_specs->specs[(int) ds_thread] = 0;
}
switch (keyword)
{
case RID_AUTO:
storage_class = sc_auto;
break;
case RID_REGISTER:
storage_class = sc_register;
break;
case RID_STATIC:
storage_class = sc_static;
break;
case RID_EXTERN:
storage_class = sc_extern;
break;
case RID_MUTABLE:
storage_class = sc_mutable;
break;
default:
gcc_unreachable ();
}
decl_specs->storage_class = storage_class;
/* A storage class specifier cannot be applied alongside a typedef
specifier. If there is a typedef specifier present then set
conflicting_specifiers_p which will trigger an error later
on in grokdeclarator. */
if (decl_specs->specs[(int)ds_typedef])
decl_specs->conflicting_specifiers_p = true;
}
/* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P
is true, the type is a class or enum definition. */
static void
cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs,
tree type_spec,
location_t location,
bool type_definition_p)
{
decl_specs->any_specifiers_p = true;
/* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t
(with, for example, in "typedef int wchar_t;") we remember that
this is what happened. In system headers, we ignore these
declarations so that G++ can work with system headers that are not
C++-safe. */
if (decl_specs->specs[(int) ds_typedef]
&& !type_definition_p
&& (type_spec == boolean_type_node
|| type_spec == char16_type_node
|| type_spec == char32_type_node
|| type_spec == wchar_type_node)
&& (decl_specs->type
|| decl_specs->specs[(int) ds_long]
|| decl_specs->specs[(int) ds_short]
|| decl_specs->specs[(int) ds_unsigned]
|| decl_specs->specs[(int) ds_signed]))
{
decl_specs->redefined_builtin_type = type_spec;
if (!decl_specs->type)
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = false;
decl_specs->type_location = location;
}
}
else if (decl_specs->type)
decl_specs->multiple_types_p = true;
else
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = type_definition_p;
decl_specs->redefined_builtin_type = NULL_TREE;
decl_specs->type_location = location;
}
}
/* DECL_SPECIFIERS is the representation of a decl-specifier-seq.
Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */
static bool
cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers)
{
return decl_specifiers->specs[(int) ds_friend] != 0;
}
/* Issue an error message indicating that TOKEN_DESC was expected.
If KEYWORD is true, it indicated this function is called by
cp_parser_require_keword and the required token can only be
a indicated keyword. */
static void
cp_parser_required_error (cp_parser *parser,
required_token token_desc,
bool keyword)
{
switch (token_desc)
{
case RT_NEW:
cp_parser_error (parser, "expected %<new%>");
return;
case RT_DELETE:
cp_parser_error (parser, "expected %<delete%>");
return;
case RT_RETURN:
cp_parser_error (parser, "expected %<return%>");
return;
case RT_WHILE:
cp_parser_error (parser, "expected %<while%>");
return;
case RT_EXTERN:
cp_parser_error (parser, "expected %<extern%>");
return;
case RT_STATIC_ASSERT:
cp_parser_error (parser, "expected %<static_assert%>");
return;
case RT_DECLTYPE:
cp_parser_error (parser, "expected %<decltype%>");
return;
case RT_OPERATOR:
cp_parser_error (parser, "expected %<operator%>");
return;
case RT_CLASS:
cp_parser_error (parser, "expected %<class%>");
return;
case RT_TEMPLATE:
cp_parser_error (parser, "expected %<template%>");
return;
case RT_NAMESPACE:
cp_parser_error (parser, "expected %<namespace%>");
return;
case RT_USING:
cp_parser_error (parser, "expected %<using%>");
return;
case RT_ASM:
cp_parser_error (parser, "expected %<asm%>");
return;
case RT_TRY:
cp_parser_error (parser, "expected %<try%>");
return;
case RT_CATCH:
cp_parser_error (parser, "expected %<catch%>");
return;
case RT_THROW:
cp_parser_error (parser, "expected %<throw%>");
return;
case RT_LABEL:
cp_parser_error (parser, "expected %<__label__%>");
return;
case RT_AT_TRY:
cp_parser_error (parser, "expected %<@try%>");
return;
case RT_AT_SYNCHRONIZED:
cp_parser_error (parser, "expected %<@synchronized%>");
return;
case RT_AT_THROW:
cp_parser_error (parser, "expected %<@throw%>");
return;
case RT_TRANSACTION_ATOMIC:
cp_parser_error (parser, "expected %<__transaction_atomic%>");
return;
case RT_TRANSACTION_RELAXED:
cp_parser_error (parser, "expected %<__transaction_relaxed%>");
return;
default:
break;
}
if (!keyword)
{
switch (token_desc)
{
case RT_SEMICOLON:
cp_parser_error (parser, "expected %<;%>");
return;
case RT_OPEN_PAREN:
cp_parser_error (parser, "expected %<(%>");
return;
case RT_CLOSE_BRACE:
cp_parser_error (parser, "expected %<}%>");
return;
case RT_OPEN_BRACE:
cp_parser_error (parser, "expected %<{%>");
return;
case RT_CLOSE_SQUARE:
cp_parser_error (parser, "expected %<]%>");
return;
case RT_OPEN_SQUARE:
cp_parser_error (parser, "expected %<[%>");
return;
case RT_COMMA:
cp_parser_error (parser, "expected %<,%>");
return;
case RT_SCOPE:
cp_parser_error (parser, "expected %<::%>");
return;
case RT_LESS:
cp_parser_error (parser, "expected %<<%>");
return;
case RT_GREATER:
cp_parser_error (parser, "expected %<>%>");
return;
case RT_EQ:
cp_parser_error (parser, "expected %<=%>");
return;
case RT_ELLIPSIS:
cp_parser_error (parser, "expected %<...%>");
return;
case RT_MULT:
cp_parser_error (parser, "expected %<*%>");
return;
case RT_COMPL:
cp_parser_error (parser, "expected %<~%>");
return;
case RT_COLON:
cp_parser_error (parser, "expected %<:%>");
return;
case RT_COLON_SCOPE:
cp_parser_error (parser, "expected %<:%> or %<::%>");
return;
case RT_CLOSE_PAREN:
cp_parser_error (parser, "expected %<)%>");
return;
case RT_COMMA_CLOSE_PAREN:
cp_parser_error (parser, "expected %<,%> or %<)%>");
return;
case RT_PRAGMA_EOL:
cp_parser_error (parser, "expected end of line");
return;
case RT_NAME:
cp_parser_error (parser, "expected identifier");
return;
case RT_SELECT:
cp_parser_error (parser, "expected selection-statement");
return;
case RT_INTERATION:
cp_parser_error (parser, "expected iteration-statement");
return;
case RT_JUMP:
cp_parser_error (parser, "expected jump-statement");
return;
case RT_CLASS_KEY:
cp_parser_error (parser, "expected class-key");
return;
case RT_CLASS_TYPENAME_TEMPLATE:
cp_parser_error (parser,
"expected %<class%>, %<typename%>, or %<template%>");
return;
default:
gcc_unreachable ();
}
}
else
gcc_unreachable ();
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require (cp_parser* parser,
enum cpp_ttype type,
required_token token_desc)
{
if (cp_lexer_next_token_is (parser->lexer, type))
return cp_lexer_consume_token (parser->lexer);
else
{
/* Output the MESSAGE -- unless we're parsing tentatively. */
if (!cp_parser_simulate_error (parser))
cp_parser_required_error (parser, token_desc, /*keyword=*/false);
return NULL;
}
}
/* An error message is produced if the next token is not '>'.
All further tokens are skipped until the desired token is
found or '{', '}', ';' or an unbalanced ')' or ']'. */
static void
cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser)
{
/* Current level of '< ... >'. */
unsigned level = 0;
/* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */
unsigned nesting_depth = 0;
/* Are we ready, yet? If not, issue error message. */
if (cp_parser_require (parser, CPP_GREATER, RT_GREATER))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_LESS:
if (!nesting_depth)
++level;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
/* C++0x views the `>>' operator as two `>' tokens, but
C++98 does not. */
break;
else if (!nesting_depth && level-- == 0)
{
/* We've hit a `>>' where the first `>' closes the
template argument list, and the second `>' is
spurious. Just consume the `>>' and stop; we've
already produced at least one error. */
cp_lexer_consume_token (parser->lexer);
return;
}
/* Fall through for C++0x, so we handle the second `>' in
the `>>'. */
case CPP_GREATER:
if (!nesting_depth && level-- == 0)
{
/* We've reached the token we want, consume it and stop. */
cp_lexer_consume_token (parser->lexer);
return;
}
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
++nesting_depth;
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
if (nesting_depth-- == 0)
return;
break;
case CPP_EOF:
case CPP_PRAGMA_EOL:
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
/* The '>' was probably forgotten, don't look further. */
return;
default:
break;
}
/* Consume this token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require_keyword (cp_parser* parser,
enum rid keyword,
required_token token_desc)
{
cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc);
if (token && token->keyword != keyword)
{
cp_parser_required_error (parser, token_desc, /*keyword=*/true);
return NULL;
}
return token;
}
/* Returns TRUE iff TOKEN is a token that can begin the body of a
function-definition. */
static bool
cp_parser_token_starts_function_definition_p (cp_token* token)
{
return (/* An ordinary function-body begins with an `{'. */
token->type == CPP_OPEN_BRACE
/* A ctor-initializer begins with a `:'. */
|| token->type == CPP_COLON
/* A function-try-block begins with `try'. */
|| token->keyword == RID_TRY
/* A function-transaction-block begins with `__transaction_atomic'
or `__transaction_relaxed'. */
|| token->keyword == RID_TRANSACTION_ATOMIC
|| token->keyword == RID_TRANSACTION_RELAXED
/* The named return value extension begins with `return'. */
|| token->keyword == RID_RETURN);
}
/* Returns TRUE iff the next token is the ":" or "{" beginning a class
definition. */
static bool
cp_parser_next_token_starts_class_definition_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON);
}
/* Returns TRUE iff the next token is the "," or ">" (or `>>', in
C++0x) ending a template-argument. */
static bool
cp_parser_next_token_ends_template_argument_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_COMMA
|| token->type == CPP_GREATER
|| token->type == CPP_ELLIPSIS
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT));
}
/* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the
(n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */
static bool
cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser,
size_t n)
{
cp_token *token;
token = cp_lexer_peek_nth_token (parser->lexer, n);
if (token->type == CPP_LESS)
return true;
/* Check for the sequence `<::' in the original code. It would be lexed as
`[:', where `[' is a digraph, and there is no whitespace before
`:'. */
if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH)
{
cp_token *token2;
token2 = cp_lexer_peek_nth_token (parser->lexer, n+1);
if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE))
return true;
}
return false;
}
/* Returns the kind of tag indicated by TOKEN, if it is a class-key,
or none_type otherwise. */
static enum tag_types
cp_parser_token_is_class_key (cp_token* token)
{
switch (token->keyword)
{
case RID_CLASS:
return class_type;
case RID_STRUCT:
return record_type;
case RID_UNION:
return union_type;
default:
return none_type;
}
}
/* Issue an error message if the CLASS_KEY does not match the TYPE. */
static void
cp_parser_check_class_key (enum tag_types class_key, tree type)
{
if (type == error_mark_node)
return;
if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type))
{
permerror (input_location, "%qs tag used in naming %q#T",
class_key == union_type ? "union"
: class_key == record_type ? "struct" : "class",
type);
inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)),
"%q#T was previously declared here", type);
}
}
/* Issue an error message if DECL is redeclared with different
access than its original declaration [class.access.spec/3].
This applies to nested classes and nested class templates.
[class.mem/1]. */
static void
cp_parser_check_access_in_redeclaration (tree decl, location_t location)
{
if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl)))
return;
if ((TREE_PRIVATE (decl)
!= (current_access_specifier == access_private_node))
|| (TREE_PROTECTED (decl)
!= (current_access_specifier == access_protected_node)))
error_at (location, "%qD redeclared with different access", decl);
}
/* Look for the `template' keyword, as a syntactic disambiguator.
Return TRUE iff it is present, in which case it will be
consumed. */
static bool
cp_parser_optional_template_keyword (cp_parser *parser)
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* The `template' keyword can only be used within templates;
outside templates the parser can always figure out what is a
template and what is not. */
if (!processing_template_decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"%<template%> (as a disambiguator) is only allowed "
"within templates");
/* If this part of the token stream is rescanned, the same
error message would be generated. So, we purge the token
from the stream. */
cp_lexer_purge_token (parser->lexer);
return false;
}
else
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
return true;
}
}
return false;
}
/* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token,
set PARSER->SCOPE, and perform other related actions. */
static void
cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser)
{
int i;
struct tree_check *check_value;
deferred_access_check *chk;
VEC (deferred_access_check,gc) *checks;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
checks = check_value->checks;
if (checks)
{
FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl);
}
/* Set the scope from the stored value. */
parser->scope = check_value->value;
parser->qualifying_scope = check_value->qualifying_scope;
parser->object_scope = NULL_TREE;
}
/* Consume tokens up through a non-nested END token. Returns TRUE if we
encounter the end of a block before what we were looking for. */
static bool
cp_parser_cache_group (cp_parser *parser,
enum cpp_ttype end,
unsigned depth)
{
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Abort a parenthesized expression if we encounter a semicolon. */
if ((end == CPP_CLOSE_PAREN || depth == 0)
&& token->type == CPP_SEMICOLON)
return true;
/* If we've reached the end of the file, stop. */
if (token->type == CPP_EOF
|| (end != CPP_PRAGMA_EOL
&& token->type == CPP_PRAGMA_EOL))
return true;
if (token->type == CPP_CLOSE_BRACE && depth == 0)
/* We've hit the end of an enclosing block, so there's been some
kind of syntax error. */
return true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* See if it starts a new group. */
if (token->type == CPP_OPEN_BRACE)
{
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1);
/* In theory this should probably check end == '}', but
cp_parser_save_member_function_body needs it to exit
after either '}' or ')' when called with ')'. */
if (depth == 0)
return false;
}
else if (token->type == CPP_OPEN_PAREN)
{
cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1);
if (depth == 0 && end == CPP_CLOSE_PAREN)
return false;
}
else if (token->type == CPP_PRAGMA)
cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1);
else if (token->type == end)
return false;
}
}
/* Like above, for caching a default argument or NSDMI. Both of these are
terminated by a non-nested comma, but it can be unclear whether or not a
comma is nested in a template argument list unless we do more parsing.
In order to handle this ambiguity, when we encounter a ',' after a '<'
we try to parse what follows as a parameter-declaration-list (in the
case of a default argument) or a member-declarator (in the case of an
NSDMI). If that succeeds, then we stop caching. */
static tree
cp_parser_cache_defarg (cp_parser *parser, bool nsdmi)
{
unsigned depth = 0;
int maybe_template_id = 0;
cp_token *first_token;
cp_token *token;
tree default_argument;
/* Add tokens until we have processed the entire default
argument. We add the range [first_token, token). */
first_token = cp_lexer_peek_token (parser->lexer);
if (first_token->type == CPP_OPEN_BRACE)
{
/* For list-initialization, this is straightforward. */
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
token = cp_lexer_peek_token (parser->lexer);
}
else while (true)
{
bool done = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* What we do depends on what token we have. */
switch (token->type)
{
/* In valid code, a default argument must be
immediately followed by a `,' `)', or `...'. */
case CPP_COMMA:
if (depth == 0 && maybe_template_id)
{
/* If we've seen a '<', we might be in a
template-argument-list. Until Core issue 325 is
resolved, we don't know how this situation ought
to be handled, so try to DTRT. We check whether
what comes after the comma is a valid parameter
declaration list. If it is, then the comma ends
the default argument; otherwise the default
argument continues. */
bool error = false;
tree t;
/* Set ITALP so cp_parser_parameter_declaration_list
doesn't decide to commit to this parse. */
bool saved_italp = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
cp_parser_parse_tentatively (parser);
cp_lexer_consume_token (parser->lexer);
if (nsdmi)
{
int ctor_dtor_or_conv_p;
cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true);
}
else
{
begin_scope (sk_function_parms, NULL_TREE);
cp_parser_parameter_declaration_list (parser, &error);
for (t = current_binding_level->names; t; t = DECL_CHAIN (t))
pop_binding (DECL_NAME (t), t);
leave_scope ();
}
if (!cp_parser_error_occurred (parser) && !error)
done = true;
cp_parser_abort_tentative_parse (parser);
parser->in_template_argument_list_p = saved_italp;
break;
}
case CPP_CLOSE_PAREN:
case CPP_ELLIPSIS:
/* If we run into a non-nested `;', `}', or `]',
then the code is invalid -- but the default
argument is certainly over. */
case CPP_SEMICOLON:
case CPP_CLOSE_BRACE:
case CPP_CLOSE_SQUARE:
if (depth == 0)
done = true;
/* Update DEPTH, if necessary. */
else if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_SQUARE)
--depth;
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
case CPP_OPEN_BRACE:
++depth;
break;
case CPP_LESS:
if (depth == 0)
/* This might be the comparison operator, or it might
start a template argument list. */
++maybe_template_id;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
break;
/* Fall through for C++0x, which treats the `>>'
operator like two `>' tokens in certain
cases. */
case CPP_GREATER:
if (depth == 0)
{
/* This might be an operator, or it might close a
template argument list. But if a previous '<'
started a template argument list, this will have
closed it, so we can't be in one anymore. */
maybe_template_id -= 1 + (token->type == CPP_RSHIFT);
if (maybe_template_id < 0)
maybe_template_id = 0;
}
break;
/* If we run out of tokens, issue an error message. */
case CPP_EOF:
case CPP_PRAGMA_EOL:
error_at (token->location, "file ends in default argument");
done = true;
break;
case CPP_NAME:
case CPP_SCOPE:
/* In these cases, we should look for template-ids.
For example, if the default argument is
`X<int, double>()', we need to do name lookup to
figure out whether or not `X' is a template; if
so, the `,' does not end the default argument.
That is not yet done. */
break;
default:
break;
}
/* If we've reached the end, stop. */
if (done)
break;
/* Add the token to the token block. */
token = cp_lexer_consume_token (parser->lexer);
}
/* Create a DEFAULT_ARG to represent the unparsed default
argument. */
default_argument = make_node (DEFAULT_ARG);
DEFARG_TOKENS (default_argument)
= cp_token_cache_new (first_token, token);
DEFARG_INSTANTIATIONS (default_argument) = NULL;
return default_argument;
}
/* Begin parsing tentatively. We always save tokens while parsing
tentatively so that if the tentative parsing fails we can restore the
tokens. */
static void
cp_parser_parse_tentatively (cp_parser* parser)
{
/* Enter a new parsing context. */
parser->context = cp_parser_context_new (parser->context);
/* Begin saving tokens. */
cp_lexer_save_tokens (parser->lexer);
/* In order to avoid repetitive access control error messages,
access checks are queued up until we are no longer parsing
tentatively. */
push_deferring_access_checks (dk_deferred);
}
/* Commit to the currently active tentative parse. */
static void
cp_parser_commit_to_tentative_parse (cp_parser* parser)
{
cp_parser_context *context;
cp_lexer *lexer;
/* Mark all of the levels as committed. */
lexer = parser->lexer;
for (context = parser->context; context->next; context = context->next)
{
if (context->status == CP_PARSER_STATUS_KIND_COMMITTED)
break;
context->status = CP_PARSER_STATUS_KIND_COMMITTED;
while (!cp_lexer_saving_tokens (lexer))
lexer = lexer->next;
cp_lexer_commit_tokens (lexer);
}
}
/* Abort the currently active tentative parse. All consumed tokens
will be rolled back, and no diagnostics will be issued. */
static void
cp_parser_abort_tentative_parse (cp_parser* parser)
{
gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED
|| errorcount > 0);
cp_parser_simulate_error (parser);
/* Now, pretend that we want to see if the construct was
successfully parsed. */
cp_parser_parse_definitely (parser);
}
/* Stop parsing tentatively. If a parse error has occurred, restore the
token stream. Otherwise, commit to the tokens we have consumed.
Returns true if no error occurred; false otherwise. */
static bool
cp_parser_parse_definitely (cp_parser* parser)
{
bool error_occurred;
cp_parser_context *context;
/* Remember whether or not an error occurred, since we are about to
destroy that information. */
error_occurred = cp_parser_error_occurred (parser);
/* Remove the topmost context from the stack. */
context = parser->context;
parser->context = context->next;
/* If no parse errors occurred, commit to the tentative parse. */
if (!error_occurred)
{
/* Commit to the tokens read tentatively, unless that was
already done. */
if (context->status != CP_PARSER_STATUS_KIND_COMMITTED)
cp_lexer_commit_tokens (parser->lexer);
pop_to_parent_deferring_access_checks ();
}
/* Otherwise, if errors occurred, roll back our state so that things
are just as they were before we began the tentative parse. */
else
{
cp_lexer_rollback_tokens (parser->lexer);
pop_deferring_access_checks ();
}
/* Add the context to the front of the free list. */
context->next = cp_parser_context_free_list;
cp_parser_context_free_list = context;
return !error_occurred;
}
/* Returns true if we are parsing tentatively and are not committed to
this tentative parse. */
static bool
cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED);
}
/* Returns nonzero iff an error has occurred during the most recent
tentative parse. */
static bool
cp_parser_error_occurred (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status == CP_PARSER_STATUS_KIND_ERROR);
}
/* Returns nonzero if GNU extensions are allowed. */
static bool
cp_parser_allow_gnu_extensions_p (cp_parser* parser)
{
return parser->allow_gnu_extensions_p;
}
/* Objective-C++ Productions */
/* Parse an Objective-C expression, which feeds into a primary-expression
above.
objc-expression:
objc-message-expression
objc-string-literal
objc-encode-expression
objc-protocol-expression
objc-selector-expression
Returns a tree representation of the expression. */
static tree
cp_parser_objc_expression (cp_parser* parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->type)
{
case CPP_OPEN_SQUARE:
return cp_parser_objc_message_expression (parser);
case CPP_OBJC_STRING:
kwd = cp_lexer_consume_token (parser->lexer);
return objc_build_string_object (kwd->u.value);
case CPP_KEYWORD:
switch (kwd->keyword)
{
case RID_AT_ENCODE:
return cp_parser_objc_encode_expression (parser);
case RID_AT_PROTOCOL:
return cp_parser_objc_protocol_expression (parser);
case RID_AT_SELECTOR:
return cp_parser_objc_selector_expression (parser);
default:
break;
}
default:
error_at (kwd->location,
"misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* Parse an Objective-C message expression.
objc-message-expression:
[ objc-message-receiver objc-message-args ]
Returns a representation of an Objective-C message. */
static tree
cp_parser_objc_message_expression (cp_parser* parser)
{
tree receiver, messageargs;
cp_lexer_consume_token (parser->lexer); /* Eat '['. */
receiver = cp_parser_objc_message_receiver (parser);
messageargs = cp_parser_objc_message_args (parser);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return objc_build_message_expr (receiver, messageargs);
}
/* Parse an objc-message-receiver.
objc-message-receiver:
expression
simple-type-specifier
Returns a representation of the type or expression. */
static tree
cp_parser_objc_message_receiver (cp_parser* parser)
{
tree rcv;
/* An Objective-C message receiver may be either (1) a type
or (2) an expression. */
cp_parser_parse_tentatively (parser);
rcv = cp_parser_expression (parser, false, NULL);
if (cp_parser_parse_definitely (parser))
return rcv;
rcv = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
return objc_get_class_reference (rcv);
}
/* Parse the arguments and selectors comprising an Objective-C message.
objc-message-args:
objc-selector
objc-selector-args
objc-selector-args , objc-comma-args
objc-selector-args:
objc-selector [opt] : assignment-expression
objc-selector-args objc-selector [opt] : assignment-expression
objc-comma-args:
assignment-expression
objc-comma-args , assignment-expression
Returns a TREE_LIST, with TREE_PURPOSE containing a list of
selector arguments and TREE_VALUE containing a list of comma
arguments. */
static tree
cp_parser_objc_message_args (cp_parser* parser)
{
tree sel_args = NULL_TREE, addl_args = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, arg;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
return build_tree_list (selector, NULL_TREE);
maybe_unary_selector_p = false;
cp_parser_require (parser, CPP_COLON, RT_COLON);
arg = cp_parser_assignment_expression (parser, false, NULL);
sel_args
= chainon (sel_args,
build_tree_list (selector, arg));
token = cp_lexer_peek_token (parser->lexer);
}
/* Handle non-selector arguments, if any. */
while (token->type == CPP_COMMA)
{
tree arg;
cp_lexer_consume_token (parser->lexer);
arg = cp_parser_assignment_expression (parser, false, NULL);
addl_args
= chainon (addl_args,
build_tree_list (NULL_TREE, arg));
token = cp_lexer_peek_token (parser->lexer);
}
if (sel_args == NULL_TREE && addl_args == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ message argument(s) are expected");
return build_tree_list (error_mark_node, error_mark_node);
}
return build_tree_list (sel_args, addl_args);
}
/* Parse an Objective-C encode expression.
objc-encode-expression:
@encode objc-typename
Returns an encoded representation of the type argument. */
static tree
cp_parser_objc_encode_expression (cp_parser* parser)
{
tree type;
cp_token *token;
cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
type = complete_type (cp_parser_type_id (parser));
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (!type)
{
error_at (token->location,
"%<@encode%> must specify a type as an argument");
return error_mark_node;
}
/* This happens if we find @encode(T) (where T is a template
typename or something dependent on a template typename) when
parsing a template. In that case, we can't compile it
immediately, but we rather create an AT_ENCODE_EXPR which will
need to be instantiated when the template is used.
*/
if (dependent_type_p (type))
{
tree value = build_min (AT_ENCODE_EXPR, size_type_node, type);
TREE_READONLY (value) = 1;
return value;
}
return objc_build_encode_expr (type);
}
/* Parse an Objective-C @defs expression. */
static tree
cp_parser_objc_defs_expression (cp_parser *parser)
{
tree name;
cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
name = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_get_class_ivars (name);
}
/* Parse an Objective-C protocol expression.
objc-protocol-expression:
@protocol ( identifier )
Returns a representation of the protocol expression. */
static tree
cp_parser_objc_protocol_expression (cp_parser* parser)
{
tree proto;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
proto = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_protocol_expr (proto);
}
/* Parse an Objective-C selector expression.
objc-selector-expression:
@selector ( objc-method-signature )
objc-method-signature:
objc-selector
objc-selector-seq
objc-selector-seq:
objc-selector :
objc-selector-seq objc-selector :
Returns a representation of the method selector. */
static tree
cp_parser_objc_selector_expression (cp_parser* parser)
{
tree sel_seq = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON
|| token->type == CPP_SCOPE)
{
tree selector = NULL_TREE;
if (token->type != CPP_COLON
|| token->type == CPP_SCOPE)
selector = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE))
{
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p)
{
sel_seq = selector;
goto finish_selector;
}
else
{
cp_parser_error (parser, "expected %<:%>");
}
}
maybe_unary_selector_p = false;
token = cp_lexer_consume_token (parser->lexer);
if (token->type == CPP_SCOPE)
{
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
sel_seq
= chainon (sel_seq,
build_tree_list (NULL_TREE, NULL_TREE));
}
else
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
token = cp_lexer_peek_token (parser->lexer);
}
finish_selector:
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_selector_expr (loc, sel_seq);
}
/* Parse a list of identifiers.
objc-identifier-list:
identifier
objc-identifier-list , identifier
Returns a TREE_LIST of identifier nodes. */
static tree
cp_parser_objc_identifier_list (cp_parser* parser)
{
tree identifier;
tree list;
cp_token *sep;
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
list = build_tree_list (NULL_TREE, identifier);
sep = cp_lexer_peek_token (parser->lexer);
while (sep->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return list;
list = chainon (list, build_tree_list (NULL_TREE,
identifier));
sep = cp_lexer_peek_token (parser->lexer);
}
return list;
}
/* Parse an Objective-C alias declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
This function registers the alias mapping with the Objective-C front end.
It returns nothing. */
static void
cp_parser_objc_alias_declaration (cp_parser* parser)
{
tree alias, orig;
cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */
alias = cp_parser_identifier (parser);
orig = cp_parser_identifier (parser);
objc_declare_alias (alias, orig);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C class forward-declaration.
objc-class-declaration:
@class objc-identifier-list ;
The function registers the forward declarations with the Objective-C
front end. It returns nothing. */
static void
cp_parser_objc_class_declaration (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_class (id);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse a list of Objective-C protocol references.
objc-protocol-refs-opt:
objc-protocol-refs [opt]
objc-protocol-refs:
< objc-identifier-list >
Returns a TREE_LIST of identifiers, if any. */
static tree
cp_parser_objc_protocol_refs_opt (cp_parser* parser)
{
tree protorefs = NULL_TREE;
if(cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
cp_lexer_consume_token (parser->lexer); /* Eat '<'. */
protorefs = cp_parser_objc_identifier_list (parser);
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
}
return protorefs;
}
/* Parse a Objective-C visibility specification. */
static void
cp_parser_objc_visibility_spec (cp_parser* parser)
{
cp_token *vis = cp_lexer_peek_token (parser->lexer);
switch (vis->keyword)
{
case RID_AT_PRIVATE:
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
break;
case RID_AT_PROTECTED:
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
break;
case RID_AT_PUBLIC:
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
break;
case RID_AT_PACKAGE:
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
break;
default:
return;
}
/* Eat '@private'/'@protected'/'@public'. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C method type. Return 'true' if it is a class
(+) method, and 'false' if it is an instance (-) method. */
static inline bool
cp_parser_objc_method_type (cp_parser* parser)
{
if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS)
return true;
else
return false;
}
/* Parse an Objective-C protocol qualifier. */
static tree
cp_parser_objc_protocol_qualifiers (cp_parser* parser)
{
tree quals = NULL_TREE, node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
while (node && TREE_CODE (node) == IDENTIFIER_NODE
&& (node == ridpointers [(int) RID_IN]
|| node == ridpointers [(int) RID_OUT]
|| node == ridpointers [(int) RID_INOUT]
|| node == ridpointers [(int) RID_BYCOPY]
|| node == ridpointers [(int) RID_BYREF]
|| node == ridpointers [(int) RID_ONEWAY]))
{
quals = tree_cons (NULL_TREE, node, quals);
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
}
return quals;
}
/* Parse an Objective-C typename. */
static tree
cp_parser_objc_typename (cp_parser* parser)
{
tree type_name = NULL_TREE;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree proto_quals, cp_type = NULL_TREE;
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
proto_quals = cp_parser_objc_protocol_qualifiers (parser);
/* An ObjC type name may consist of just protocol qualifiers, in which
case the type shall default to 'id'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
cp_type = cp_parser_type_id (parser);
/* If the type could not be parsed, an error has already
been produced. For error recovery, behave as if it had
not been specified, which will use the default type
'id'. */
if (cp_type == error_mark_node)
{
cp_type = NULL_TREE;
/* We need to skip to the closing parenthesis as
cp_parser_type_id() does not seem to do it for
us. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
}
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
type_name = build_tree_list (proto_quals, cp_type);
}
return type_name;
}
/* Check to see if TYPE refers to an Objective-C selector name. */
static bool
cp_parser_objc_selector_p (enum cpp_ttype type)
{
return (type == CPP_NAME || type == CPP_KEYWORD
|| type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND
|| type == CPP_OR || type == CPP_COMPL || type == CPP_NOT
|| type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ
|| type == CPP_XOR || type == CPP_XOR_EQ);
}
/* Parse an Objective-C selector. */
static tree
cp_parser_objc_selector (cp_parser* parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
if (!cp_parser_objc_selector_p (token->type))
{
error_at (token->location, "invalid Objective-C++ selector name");
return error_mark_node;
}
/* C++ operator names are allowed to appear in ObjC selectors. */
switch (token->type)
{
case CPP_AND_AND: return get_identifier ("and");
case CPP_AND_EQ: return get_identifier ("and_eq");
case CPP_AND: return get_identifier ("bitand");
case CPP_OR: return get_identifier ("bitor");
case CPP_COMPL: return get_identifier ("compl");
case CPP_NOT: return get_identifier ("not");
case CPP_NOT_EQ: return get_identifier ("not_eq");
case CPP_OR_OR: return get_identifier ("or");
case CPP_OR_EQ: return get_identifier ("or_eq");
case CPP_XOR: return get_identifier ("xor");
case CPP_XOR_EQ: return get_identifier ("xor_eq");
default: return token->u.value;
}
}
/* Parse an Objective-C params list. */
static tree
cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes)
{
tree params = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, type_name, identifier;
tree parm_attr = NULL_TREE;
if (token->keyword == RID_ATTRIBUTE)
break;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
params = selector; /* Might be followed by attributes. */
break;
}
maybe_unary_selector_p = false;
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
{
/* Something went quite wrong. There should be a colon
here, but there is not. Stop parsing parameters. */
break;
}
type_name = cp_parser_objc_typename (parser);
/* New ObjC allows attributes on parameters too. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
parm_attr = cp_parser_attributes_opt (parser);
identifier = cp_parser_identifier (parser);
params
= chainon (params,
objc_build_keyword_decl (selector,
type_name,
identifier,
parm_attr));
token = cp_lexer_peek_token (parser->lexer);
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
return params;
}
/* Parse the non-keyword Objective-C params. */
static tree
cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp,
tree* attributes)
{
tree params = make_node (TREE_LIST);
cp_token *token = cp_lexer_peek_token (parser->lexer);
*ellipsisp = false; /* Initially, assume no ellipsis. */
while (token->type == CPP_COMMA)
{
cp_parameter_declarator *parmdecl;
tree parm;
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_ELLIPSIS)
{
cp_lexer_consume_token (parser->lexer); /* Eat '...'. */
*ellipsisp = true;
token = cp_lexer_peek_token (parser->lexer);
break;
}
/* TODO: parse attributes for tail parameters. */
parmdecl = cp_parser_parameter_declaration (parser, false, NULL);
parm = grokdeclarator (parmdecl->declarator,
&parmdecl->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
chainon (params, build_tree_list (NULL_TREE, parm));
token = cp_lexer_peek_token (parser->lexer);
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
if (*attributes == NULL_TREE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
}
else
/* We have an error, but parse the attributes, so that we can
carry on. */
*attributes = cp_parser_attributes_opt (parser);
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
return params;
}
/* Parse a linkage specification, a pragma, an extra semicolon or a block. */
static void
cp_parser_objc_interstitial_code (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token->keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
cp_parser_linkage_specification (parser);
/* Handle #pragma, if any. */
else if (token->type == CPP_PRAGMA)
cp_parser_pragma (parser, pragma_external);
/* Allow stray semicolons. */
else if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
/* Mark methods as optional or required, when building protocols. */
else if (token->keyword == RID_AT_OPTIONAL)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (true);
}
else if (token->keyword == RID_AT_REQUIRED)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (false);
}
else if (token->keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Other stray characters must generate errors. */
else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE)
{
cp_lexer_consume_token (parser->lexer);
error ("stray %qs between Objective-C++ methods",
token->type == CPP_OPEN_BRACE ? "{" : "}");
}
/* Finally, try to parse a block-declaration, or a function-definition. */
else
cp_parser_block_declaration (parser, /*statement_p=*/false);
}
/* Parse a method signature. */
static tree
cp_parser_objc_method_signature (cp_parser* parser, tree* attributes)
{
tree rettype, kwdparms, optparms;
bool ellipsis = false;
bool is_class_method;
is_class_method = cp_parser_objc_method_type (parser);
rettype = cp_parser_objc_typename (parser);
*attributes = NULL_TREE;
kwdparms = cp_parser_objc_method_keyword_params (parser, attributes);
if (kwdparms == error_mark_node)
return error_mark_node;
optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes);
if (optparms == error_mark_node)
return error_mark_node;
return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis);
}
static bool
cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser)
{
tree tattr;
cp_lexer_save_tokens (parser->lexer);
tattr = cp_parser_attributes_opt (parser);
gcc_assert (tattr) ;
/* If the attributes are followed by a method introducer, this is not allowed.
Dump the attributes and flag the situation. */
if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS)
|| cp_lexer_next_token_is (parser->lexer, CPP_MINUS))
return true;
/* Otherwise, the attributes introduce some interstitial code, possibly so
rewind to allow that check. */
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* Parse an Objective-C method prototype list. */
static void
cp_parser_objc_method_prototype_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
tree attributes, sig;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
sig = cp_parser_objc_method_signature (parser, &attributes);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_add_method_declaration (is_class_method, sig, attributes);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (cp_lexer_peek_token (parser->lexer)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_interface ();
}
/* Parse an Objective-C method definition list. */
static void
cp_parser_objc_method_definition_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
tree meth;
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
cp_token *ptk;
tree sig, attribute;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
push_deferring_access_checks (dk_deferred);
sig = cp_parser_objc_method_signature (parser, &attribute);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_start_method_definition (is_class_method, sig, attribute,
NULL_TREE);
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
ptk = cp_lexer_peek_token (parser->lexer);
if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS
|| ptk->type == CPP_EOF || ptk->keyword == RID_AT_END))
{
perform_deferred_access_checks ();
stop_deferring_access_checks ();
meth = cp_parser_function_definition_after_declarator (parser,
false);
pop_deferring_access_checks ();
objc_finish_method_definition (meth);
}
}
/* The following case will be removed once @synthesize is
completely implemented. */
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_AT_SYNTHESIZE)
cp_parser_objc_at_synthesize_declaration (parser);
else if (token->keyword == RID_AT_DYNAMIC)
cp_parser_objc_at_dynamic_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (token->location, OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_implementation ();
}
/* Parse Objective-C ivars. */
static void
cp_parser_objc_class_ivars (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_OPEN_BRACE)
return; /* No ivars specified. */
cp_lexer_consume_token (parser->lexer); /* Eat '{'. */
token = cp_lexer_peek_token (parser->lexer);
while (token->type != CPP_CLOSE_BRACE
&& token->keyword != RID_AT_END && token->type != CPP_EOF)
{
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_objc_visibility_spec (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
break;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&declspecs,
&decl_class_or_enum_p);
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.storage_class = sc_none;
}
/* __thread. */
if (declspecs.specs[(int) ds_thread])
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.specs[(int) ds_thread] = 0;
}
/* typedef. */
if (declspecs.specs[(int) ds_typedef])
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.specs[(int) ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree width = NULL_TREE, attributes, first_attribute, decl;
cp_declarator *declarator = NULL;
int ctor_dtor_or_conv_p;
/* Check for a (possibly unnamed) bitfield declaration. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COLON)
goto eat_colon;
if (token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
/* Get the name of the bitfield. */
declarator = make_id_declarator (NULL_TREE,
cp_parser_identifier (parser),
sfk_none);
eat_colon:
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/false,
NULL);
}
else
{
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
}
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
if (width)
/* Create the bitfield declaration. */
decl = grokbitfield (declarator, &declspecs,
width,
attributes);
else
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
/* Add the instance variable. */
if (decl != error_mark_node && decl != NULL_TREE)
objc_add_instance_variable (decl);
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->keyword == RID_AT_END)
cp_parser_error (parser, "expected %<}%>");
/* Do not consume the RID_AT_END, so it will be read again as terminating
the @interface of @implementation. */
if (token->keyword != RID_AT_END && token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '}'. */
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C protocol declaration. */
static void
cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes)
{
tree proto, protorefs;
cp_token *tok;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
tok = cp_lexer_peek_token (parser->lexer);
error_at (tok->location, "identifier expected after %<@protocol%>");
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
/* See if we have a forward declaration or a definition. */
tok = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Try a forward declaration first. */
if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON)
{
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_protocol (id, attributes);
if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Ok, we got a full-fledged definition (or at least should). */
else
{
proto = cp_parser_identifier (parser);
protorefs = cp_parser_objc_protocol_refs_opt (parser);
objc_start_protocol (proto, protorefs, attributes);
cp_parser_objc_method_prototype_list (parser);
}
}
/* Parse an Objective-C superclass or category. */
static void
cp_parser_objc_superclass_or_category (cp_parser *parser,
bool iface_p,
tree *super,
tree *categ, bool *is_class_extension)
{
cp_token *next = cp_lexer_peek_token (parser->lexer);
*super = *categ = NULL_TREE;
*is_class_extension = false;
if (next->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
*super = cp_parser_identifier (parser);
}
else if (next->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
/* If there is no category name, and this is an @interface, we
have a class extension. */
if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
{
*categ = NULL_TREE;
*is_class_extension = true;
}
else
*categ = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
}
/* Parse an Objective-C class interface. */
static void
cp_parser_objc_class_interface (cp_parser* parser, tree attributes)
{
tree name, super, categ, protos;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @interface stuff
is to follow, we can't compile it (or validate it) if we
don't even know which class it refers to. Let's assume this
was a stray '@interface' token in the stream and skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, true, &super, &categ,
&is_class_extension);
protos = cp_parser_objc_protocol_refs_opt (parser);
/* We have either a class or a category on our hands. */
if (categ || is_class_extension)
objc_start_category_interface (name, categ, protos, attributes);
else
{
objc_start_class_interface (name, super, protos, attributes);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_interface ();
}
cp_parser_objc_method_prototype_list (parser);
}
/* Parse an Objective-C class implementation. */
static void
cp_parser_objc_class_implementation (cp_parser* parser)
{
tree name, super, categ;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @implementation
stuff is to follow, we can't compile it (or validate it) if
we don't even know which class it refers to. Let's assume
this was a stray '@implementation' token in the stream and
skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, false, &super, &categ,
&is_class_extension);
/* We have either a class or a category on our hands. */
if (categ)
objc_start_category_implementation (name, categ);
else
{
objc_start_class_implementation (name, super);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_implementation ();
}
cp_parser_objc_method_definition_list (parser);
}
/* Consume the @end token and finish off the implementation. */
static void
cp_parser_objc_end_implementation (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_implementation ();
}
/* Parse an Objective-C declaration. */
static void
cp_parser_objc_declaration (cp_parser* parser, tree attributes)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
if (attributes)
switch (kwd->keyword)
{
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
error_at (kwd->location, "attributes may not be specified before"
" the %<@%D%> Objective-C++ keyword",
kwd->u.value);
attributes = NULL;
break;
case RID_AT_IMPLEMENTATION:
warning_at (kwd->location, OPT_Wattributes,
"prefix attributes are ignored before %<@%D%>",
kwd->u.value);
attributes = NULL;
default:
break;
}
switch (kwd->keyword)
{
case RID_AT_ALIAS:
cp_parser_objc_alias_declaration (parser);
break;
case RID_AT_CLASS:
cp_parser_objc_class_declaration (parser);
break;
case RID_AT_PROTOCOL:
cp_parser_objc_protocol_declaration (parser, attributes);
break;
case RID_AT_INTERFACE:
cp_parser_objc_class_interface (parser, attributes);
break;
case RID_AT_IMPLEMENTATION:
cp_parser_objc_class_implementation (parser);
break;
case RID_AT_END:
cp_parser_objc_end_implementation (parser);
break;
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
}
/* Parse an Objective-C try-catch-finally statement.
objc-try-catch-finally-stmt:
@try compound-statement objc-catch-clause-seq [opt]
objc-finally-clause [opt]
objc-catch-clause-seq:
objc-catch-clause objc-catch-clause-seq [opt]
objc-catch-clause:
@catch ( objc-exception-declaration ) compound-statement
objc-finally-clause:
@finally compound-statement
objc-exception-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
Returns NULL_TREE.
PS: This function is identical to c_parser_objc_try_catch_finally_statement
for C. Keep them in sync. */
static tree
cp_parser_objc_try_catch_finally_statement (cp_parser *parser)
{
location_t location;
tree stmt;
cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
/* NB: The @try block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_begin_try_stmt (location, pop_stmt_list (stmt));
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH))
{
cp_parameter_declarator *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
cp_lexer_consume_token (parser->lexer);
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
seen_open_paren = true;
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
cp_lexer_consume_token (parser->lexer);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = cp_parser_parameter_declaration (parser, false, NULL);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokdeclarator (parm->declarator,
&parm->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
}
if (seen_open_paren)
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
cp_lexer_consume_token (parser->lexer);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
cp_parser_compound_statement (parser, NULL, false, false);
objc_finish_catch_clause ();
}
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY))
{
cp_lexer_consume_token (parser->lexer);
location = cp_lexer_peek_token (parser->lexer)->location;
/* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_build_finally_clause (location, pop_stmt_list (stmt));
}
return objc_finish_try_stmt ();
}
/* Parse an Objective-C synchronized statement.
objc-synchronized-stmt:
@synchronized ( expression ) compound-statement
Returns NULL_TREE. */
static tree
cp_parser_objc_synchronized_statement (cp_parser *parser)
{
location_t location;
tree lock, stmt;
cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
lock = cp_parser_expression (parser, false, NULL);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
return objc_build_synchronized (location, lock, pop_stmt_list (stmt));
}
/* Parse an Objective-C throw statement.
objc-throw-stmt:
@throw assignment-expression [opt] ;
Returns a constructed '@throw' statement. */
static tree
cp_parser_objc_throw_statement (cp_parser *parser)
{
tree expr = NULL_TREE;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
cp_parser_consume_semicolon_at_end_of_statement (parser);
return objc_build_throw_stmt (loc, expr);
}
/* Parse an Objective-C statement. */
static tree
cp_parser_objc_statement (cp_parser * parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->keyword)
{
case RID_AT_TRY:
return cp_parser_objc_try_catch_finally_statement (parser);
case RID_AT_SYNCHRONIZED:
return cp_parser_objc_synchronized_statement (parser);
case RID_AT_THROW:
return cp_parser_objc_throw_statement (parser);
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* If we are compiling ObjC++ and we see an __attribute__ we neeed to
look ahead to see if an objc keyword follows the attributes. This
is to detect the use of prefix attributes on ObjC @interface and
@protocol. */
static bool
cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib)
{
cp_lexer_save_tokens (parser->lexer);
*attrib = cp_parser_attributes_opt (parser);
gcc_assert (*attrib);
if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword))
{
cp_lexer_commit_tokens (parser->lexer);
return true;
}
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* This routine is a minimal replacement for
c_parser_struct_declaration () used when parsing the list of
types/names or ObjC++ properties. For example, when parsing the
code
@property (readonly) int a, b, c;
this function is responsible for parsing "int a, int b, int c" and
returning the declarations as CHAIN of DECLs.
TODO: Share this code with cp_parser_objc_class_ivars. It's very
similar parsing. */
static tree
cp_parser_objc_struct_declaration (cp_parser *parser)
{
tree decls = NULL_TREE;
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&declspecs,
&decl_class_or_enum_p);
if (declspecs.type == error_mark_node)
return error_mark_node;
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for property");
declspecs.storage_class = sc_none;
}
/* __thread. */
if (declspecs.specs[(int) ds_thread])
{
cp_parser_error (parser, "invalid type for property");
declspecs.specs[(int) ds_thread] = 0;
}
/* typedef. */
if (declspecs.specs[(int) ds_typedef])
{
cp_parser_error (parser, "invalid type for property");
declspecs.specs[(int) ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes, first_attribute, decl;
cp_declarator *declarator;
cp_token *token;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
NULL, NULL, false);
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
if (decl == error_mark_node || decl == NULL_TREE)
return error_mark_node;
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
DECL_CHAIN (decl) = decls;
decls = decl;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
else
break;
}
return decls;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to
c_parser_objc_at_property_declaration for C. Keep them in sync. */
static void
cp_parser_objc_at_property_declaration (cp_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
/* Eat the '('. */
cp_lexer_consume_token (parser->lexer);
while (true)
{
bool syntax_error = false;
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum rid keyword;
if (token->type != CPP_NAME)
{
cp_parser_error (parser, "expected identifier");
break;
}
keyword = C_RID_CODE (token->u.value);
cp_lexer_consume_token (parser->lexer);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (keyword == RID_GETTER)
cp_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
cp_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
cp_lexer_consume_token (parser->lexer); /* eat the = */
if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type))
{
cp_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<setter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_setter_ident = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
cp_parser_error (parser, "setter name must terminate with %<:%>");
else
cp_lexer_consume_token (parser->lexer);
}
else
{
if (property_getter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<getter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_getter_ident = cp_parser_objc_selector (parser);
}
break;
default:
cp_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
/* FIXME: "@property (setter, assign);" will generate a spurious
"error: expected ‘)’ before ‘,’ token". This is because
cp_parser_require, unlike the C counterpart, will produce an
error even if we are in error recovery. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
/* ... and the property declaration(s). */
properties = cp_parser_objc_struct_declaration (parser);
if (properties == error_mark_node)
{
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
return;
}
if (properties == NULL_TREE)
cp_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C++ @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to c_parser_objc_at_synthesize_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_synthesize_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */
while (true)
{
tree property, ivar;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
cp_lexer_consume_token (parser->lexer);
ivar = cp_parser_identifier (parser);
if (ivar == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C++ @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to c_parser_objc_at_dynamic_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_dynamic_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */
while (true)
{
tree property;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
list = chainon (list, build_tree_list (NULL, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_dynamic_declaration (loc, list);
}
/* OpenMP 2.5 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
cp_parser_omp_clause_name (cp_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'm':
if (!strcmp ("mergeable", p))
result = PRAGMA_OMP_CLAUSE_MERGEABLE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
case 'u':
if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
cp_lexer_consume_token (parser->lexer);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name, location_t location)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error_at (location, "too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
In addition, we match a closing parenthesis. An opening parenthesis
will have been consumed by the caller.
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
tree list)
{
cp_token *token;
while (1)
{
tree name, decl;
token = cp_lexer_peek_token (parser->lexer);
name = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (name == error_mark_node)
goto skip_comma;
decl = cp_parser_lookup_name_simple (parser, name, token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, name, decl, NLE_NULL,
token->location);
else if (kind != 0)
{
tree u = build_omp_clause (token->location, kind);
OMP_CLAUSE_DECL (u) = decl;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (decl, NULL_TREE, list);
get_comma:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
/* Try to resync to an unnested comma. Copied from
cp_parser_parenthesized_expression_list. */
skip_comma:
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list)
{
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return cp_parser_omp_var_list_no_open (parser, kind, list);
return list;
}
/* OpenMP 3.0:
collapse ( constant-expression ) */
static tree
cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location)
{
tree c, num;
location_t loc;
HOST_WIDE_INT n;
loc = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
num = cp_parser_constant_expression (parser, false, NULL);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (num == error_mark_node)
return list;
num = fold_non_dependent_expr (num);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !host_integerp (num, 0)
|| (n = tree_low_cst (num, 0)) <= 0
|| (int) n != n)
{
error_at (loc, "collapse argument needs positive constant integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location);
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
return c;
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
cp_lexer_consume_token (parser->lexer);
}
else
{
invalid_kind:
cp_parser_error (parser, "expected %<none%> or %<shared%>");
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location);
c = build_omp_clause (location, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 3.1:
final ( expression ) */
static tree
cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location);
c = build_omp_clause (location, OMP_CLAUSE_FINAL);
OMP_CLAUSE_FINAL_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
if ( expression ) */
static tree
cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location);
c = build_omp_clause (location, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 3.1:
mergeable */
static tree
cp_parser_omp_clause_mergeable (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable",
location);
c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location);
c = build_omp_clause (location, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
cp_parser_omp_clause_num_threads (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser, false, NULL);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS,
"num_threads", location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
ordered */
static tree
cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED,
"ordered", location);
c = build_omp_clause (location, OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || min max */
static tree
cp_parser_omp_clause_reduction (cp_parser *parser, tree list)
{
enum tree_code code;
tree nlist, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "min") == 0)
{
code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
code = MAX_EXPR;
break;
}
}
/* FALLTHROUGH */
default:
cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, "
"%<|%>, %<&&%>, %<||%>, %<min%> or %<max%>");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto resync_fail;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
return nlist;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto */
static tree
cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location)
{
tree c, t;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
t = cp_parser_assignment_expression (parser, false, NULL);
if (t == error_mark_node)
goto resync_fail;
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (token->location, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (token->location, "schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
goto resync_fail;
}
else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN))
goto resync_fail;
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location);
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid schedule kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 3.0:
untied */
static tree
cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location);
c = build_omp_clause (location, OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask,
const char *where, cp_token *pragma_tok)
{
tree clauses = NULL;
bool first = true;
cp_token *token = NULL;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
c_kind = cp_parser_omp_clause_name (parser);
first = false;
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses,
token->location);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE,
clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = cp_parser_omp_clause_default (parser, clauses,
token->location);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = cp_parser_omp_clause_final (parser, clauses, token->location);
c_name = "final";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE,
clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = cp_parser_omp_clause_if (parser, clauses, token->location);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE,
clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_MERGEABLE:
clauses = cp_parser_omp_clause_mergeable (parser, clauses,
token->location);
c_name = "mergeable";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = cp_parser_omp_clause_num_threads (parser, clauses,
token->location);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = cp_parser_omp_clause_ordered (parser, clauses,
token->location);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE,
clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = cp_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = cp_parser_omp_clause_schedule (parser, clauses,
token->location);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED,
clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = cp_parser_omp_clause_untied (parser, clauses,
token->location);
c_name = "nowait";
break;
default:
cp_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (token->location, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
cp_parser_statement calls add_stmt. */
static unsigned
cp_parser_begin_omp_structured_block (cp_parser *parser)
{
unsigned save = parser->in_statement;
/* Only move the values to IN_OMP_BLOCK if they weren't false.
This preserves the "not within loop or switch" style error messages
for nonsense cases like
void foo() {
#pragma omp single
break;
}
*/
if (parser->in_statement)
parser->in_statement = IN_OMP_BLOCK;
return save;
}
static void
cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save)
{
parser->in_statement = save;
}
static tree
cp_parser_omp_structured_block (cp_parser *parser)
{
tree stmt = begin_omp_structured_block ();
unsigned int save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_structured_block (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
OpenMP 3.1:
# pragma omp atomic new-line
update-stmt
# pragma omp atomic read new-line
read-stmt
# pragma omp atomic write new-line
write-stmt
# pragma omp atomic update new-line
update-stmt
# pragma omp atomic capture new-line
capture-stmt
# pragma omp atomic capture new-line
capture-block
read-stmt:
v = x
write-stmt:
x = expr
update-stmt:
expression-stmt | x = x binop expr
capture-stmt:
v = x binop= expr | v = x++ | v = ++x | v = x-- | v = --x
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; }
where x and v are lvalue expressions with scalar type. */
static void
cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok)
{
tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE;
tree rhs1 = NULL_TREE, orig_lhs;
enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR;
bool structured_block = false;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (!strcmp (p, "read"))
code = OMP_ATOMIC_READ;
else if (!strcmp (p, "write"))
code = NOP_EXPR;
else if (!strcmp (p, "update"))
code = OMP_ATOMIC;
else if (!strcmp (p, "capture"))
code = OMP_ATOMIC_CAPTURE_NEW;
else
p = NULL;
if (p)
cp_lexer_consume_token (parser->lexer);
}
cp_parser_require_pragma_eol (parser, pragma_tok);
switch (code)
{
case OMP_ATOMIC_READ:
case NOP_EXPR: /* atomic write */
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
if (code == NOP_EXPR)
lhs = cp_parser_expression (parser, /*cast_p=*/false, NULL);
else
lhs = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (lhs == error_mark_node)
goto saw_error;
if (code == NOP_EXPR)
{
/* atomic write is represented by OMP_ATOMIC with NOP_EXPR
opcode. */
code = OMP_ATOMIC;
rhs = lhs;
lhs = v;
v = NULL_TREE;
}
goto done;
case OMP_ATOMIC_CAPTURE_NEW:
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
structured_block = true;
}
else
{
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
}
default:
break;
}
restart:
lhs = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
orig_lhs = lhs;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
goto saw_error;
case POSTINCREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = PLUS_EXPR;
rhs = integer_one_node;
break;
case POSTDECREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
}
/* FALLTHRU */
default:
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_MULT_EQ:
opcode = MULT_EXPR;
break;
case CPP_DIV_EQ:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
opcode = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
opcode = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
opcode = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
opcode = BIT_XOR_EXPR;
break;
case CPP_EQ:
if (structured_block || code == OMP_ATOMIC)
{
enum cp_parser_prec oprec;
cp_token *token;
cp_lexer_consume_token (parser->lexer);
rhs1 = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (rhs1 == error_mark_node)
goto saw_error;
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_SEMICOLON:
if (code == OMP_ATOMIC_CAPTURE_NEW)
{
code = OMP_ATOMIC_CAPTURE_OLD;
v = lhs;
lhs = NULL_TREE;
lhs1 = rhs1;
rhs1 = NULL_TREE;
cp_lexer_consume_token (parser->lexer);
goto restart;
}
cp_parser_error (parser,
"invalid form of %<#pragma omp atomic%>");
goto saw_error;
case CPP_MULT:
opcode = MULT_EXPR;
break;
case CPP_DIV:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS:
opcode = PLUS_EXPR;
break;
case CPP_MINUS:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
opcode = RSHIFT_EXPR;
break;
case CPP_AND:
opcode = BIT_AND_EXPR;
break;
case CPP_OR:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR:
opcode = BIT_XOR_EXPR;
break;
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
oprec = TOKEN_PRECEDENCE (token);
gcc_assert (oprec != PREC_NOT_OPERATOR);
if (commutative_tree_code (opcode))
oprec = (enum cp_parser_prec) (oprec - 1);
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
oprec, NULL);
if (rhs == error_mark_node)
goto saw_error;
goto stmt_done;
}
/* FALLTHROUGH */
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_expression (parser, false, NULL);
if (rhs == error_mark_node)
goto saw_error;
break;
}
stmt_done:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
goto saw_error;
v = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
lhs1 = cp_parser_unary_expression (parser, /*address_p=*/false,
/*cast_p=*/false, NULL);
if (lhs1 == error_mark_node)
goto saw_error;
}
if (structured_block)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
done:
finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1);
if (!structured_block)
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
saw_error:
cp_parser_skip_to_end_of_block_or_statement (parser);
if (structured_block)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
else if (code == OMP_ATOMIC_CAPTURE_NEW)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
}
}
}
/* OpenMP 2.5:
# pragma omp barrier new-line */
static void
cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block */
static tree
cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, name = NULL;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
name = cp_parser_identifier (parser);
if (name == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (name == error_mark_node)
name = NULL;
}
cp_parser_require_pragma_eol (parser, pragma_tok);
stmt = cp_parser_omp_structured_block (parser);
return c_finish_omp_critical (input_location, stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
(void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_flush ();
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_cond (cp_parser *parser, tree decl)
{
tree cond = cp_parser_binary_expression (parser, false, true,
PREC_NOT_OPERATOR, NULL);
if (cond == error_mark_node
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
switch (TREE_CODE (cond))
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
default:
return error_mark_node;
}
/* If decl is an iterator, preserve LHS and RHS of the relational
expr until finish_omp_for. */
if (decl
&& (type_dependent_expression_p (decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
return cond;
return build_x_binary_op (TREE_CODE (cond),
TREE_OPERAND (cond, 0), ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
/*overload=*/NULL, tf_warning_or_error);
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_incr (cp_parser *parser, tree decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum tree_code op;
tree lhs, rhs;
cp_id_kind idk;
bool decl_first;
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? PREINCREMENT_EXPR : PREDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
lhs = cp_parser_cast_expression (parser, false, false, NULL);
if (lhs != decl)
return error_mark_node;
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
lhs = cp_parser_primary_expression (parser, false, false, false, &idk);
if (lhs != decl)
return error_mark_node;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
op = cp_parser_assignment_operator_opt (parser);
if (op == ERROR_MARK)
return error_mark_node;
if (op != NOP_EXPR)
{
rhs = cp_parser_assignment_expression (parser, false, NULL);
rhs = build2 (op, TREE_TYPE (decl), decl, rhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
lhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
decl_first = lhs == decl;
if (decl_first)
lhs = NULL_TREE;
if (token->type != CPP_PLUS
&& token->type != CPP_MINUS)
return error_mark_node;
do
{
op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR;
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first)
{
if (lhs == NULL_TREE)
{
if (op == PLUS_EXPR)
lhs = rhs;
else
lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error);
}
else
lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK,
NULL, tf_warning_or_error);
}
}
while (token->type == CPP_PLUS || token->type == CPP_MINUS);
if (!decl_first)
{
if (rhs != decl || op == MINUS_EXPR)
return error_mark_node;
rhs = build2 (op, TREE_TYPE (decl), lhs, decl);
}
else
rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
/* Parse the restricted form of the for statement allowed by OpenMP. */
static tree
cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses)
{
tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret;
tree real_decl, initv, condv, incrv, declv;
tree this_pre_body, cl;
location_t loc_first;
bool collapse_err = false;
int i, collapse = 1, nbraces = 0;
VEC(tree,gc) *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
gcc_assert (collapse >= 1);
declv = make_tree_vec (collapse);
initv = make_tree_vec (collapse);
condv = make_tree_vec (collapse);
incrv = make_tree_vec (collapse);
loc_first = cp_lexer_peek_token (parser->lexer)->location;
for (i = 0; i < collapse; i++)
{
int bracecount = 0;
bool add_private_clause = false;
location_t loc;
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_parser_error (parser, "for statement expected");
return NULL;
}
loc = cp_lexer_consume_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
init = decl = real_decl = NULL;
this_pre_body = push_stmt_list ();
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
/* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too):
init-expr:
var = lb
integer-type var = lb
random-access-iterator-type var = lb
pointer-type var = lb
*/
cp_decl_specifier_seq type_specifiers;
/* First, try to parse as an initialized declaration. See
cp_parser_condition, from whence the bulk of this is copied. */
cp_parser_parse_tentatively (parser);
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
if (cp_parser_parse_definitely (parser))
{
/* If parsing a type specifier seq succeeded, then this
MUST be a initialized declaration. */
tree asm_specification, attributes;
cp_declarator *declarator;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false);
attributes = cp_parser_attributes_opt (parser);
asm_specification = cp_parser_asm_specification_opt (parser);
if (declarator == cp_error_declarator)
cp_parser_skip_to_end_of_statement (parser);
else
{
tree pushed_scope, auto_node;
decl = start_decl (declarator, &type_specifiers,
SD_INITIALIZED, attributes,
/*prefix_attributes=*/NULL_TREE,
&pushed_scope);
auto_node = type_uses_auto (TREE_TYPE (decl));
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN))
error ("parenthesized initialization is not allowed in "
"OpenMP %<for%> loop");
else
/* Trigger an error. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
init = error_mark_node;
cp_parser_skip_to_end_of_statement (parser);
}
else if (CLASS_TYPE_P (TREE_TYPE (decl))
|| type_dependent_expression_p (decl)
|| auto_node)
{
bool is_direct_init, is_non_constant_init;
init = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (auto_node)
{
TREE_TYPE (decl)
= do_auto_deduction (TREE_TYPE (decl), init,
auto_node);
if (!CLASS_TYPE_P (TREE_TYPE (decl))
&& !type_dependent_expression_p (decl))
goto non_class;
}
cp_finish_decl (decl, init, !is_non_constant_init,
asm_specification,
LOOKUP_ONLYCONVERTING);
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
VEC_safe_push (tree, gc, for_block, this_pre_body);
init = NULL_TREE;
}
else
init = pop_stmt_list (this_pre_body);
this_pre_body = NULL_TREE;
}
else
{
/* Consume '='. */
cp_lexer_consume_token (parser->lexer);
init = cp_parser_assignment_expression (parser, false, NULL);
non_class:
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
init = error_mark_node;
else
cp_finish_decl (decl, NULL_TREE,
/*init_const_expr_p=*/false,
asm_specification,
LOOKUP_ONLYCONVERTING);
}
if (pushed_scope)
pop_scope (pushed_scope);
}
}
else
{
cp_id_kind idk;
/* If parsing a type specifier sequence failed, then
this MUST be a simple expression. */
cp_parser_parse_tentatively (parser);
decl = cp_parser_primary_expression (parser, false, false,
false, &idk);
if (!cp_parser_error_occurred (parser)
&& decl
&& DECL_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
tree rhs;
cp_parser_parse_definitely (parser);
cp_parser_require (parser, CPP_EQ, RT_EQ);
rhs = cp_parser_assignment_expression (parser, false, NULL);
finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR,
rhs,
tf_warning_or_error));
add_private_clause = true;
}
else
{
decl = NULL;
cp_parser_abort_tentative_parse (parser);
init = cp_parser_expression (parser, false, NULL);
if (init)
{
if (TREE_CODE (init) == MODIFY_EXPR
|| TREE_CODE (init) == MODOP_EXPR)
real_decl = TREE_OPERAND (init, 0);
}
}
}
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
if (decl)
real_decl = decl;
if (par_clauses != NULL && real_decl != NULL_TREE)
{
tree *c;
for (c = par_clauses; *c ; )
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
error_at (loc, "iteration variable %qD"
" should not be firstprivate", real_decl);
*c = OMP_CLAUSE_CHAIN (*c);
}
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
/* Add lastprivate (decl) clause to OMP_FOR_CLAUSES,
change it to shared (decl) in OMP_PARALLEL_CLAUSES. */
tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (l) = real_decl;
OMP_CLAUSE_CHAIN (l) = clauses;
CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c);
clauses = l;
OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
CP_OMP_CLAUSE_INFO (*c) = NULL;
add_private_clause = false;
}
else
{
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
add_private_clause = false;
c = &OMP_CLAUSE_CHAIN (*c);
}
}
if (add_private_clause)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& OMP_CLAUSE_DECL (c) == decl)
break;
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD "
"should not be firstprivate",
decl);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD should not be reduction",
decl);
}
if (c == NULL)
{
c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c);
if (c)
{
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
}
cond = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cond = cp_parser_omp_for_cond (parser, decl);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
incr = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
/* If decl is an iterator, preserve the operator on decl
until finish_omp_for. */
if (real_decl
&& ((processing_template_decl
&& !POINTER_TYPE_P (TREE_TYPE (real_decl)))
|| CLASS_TYPE_P (TREE_TYPE (real_decl))))
incr = cp_parser_omp_for_incr (parser, real_decl);
else
incr = cp_parser_expression (parser, false, NULL);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
if (i == collapse - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
cp_parser_parse_tentatively (parser);
do
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
bracecount++;
}
else if (bracecount
&& cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
loc = cp_lexer_peek_token (parser->lexer)->location;
error_at (loc, "not enough collapsed for loops");
collapse_err = true;
cp_parser_abort_tentative_parse (parser);
declv = NULL_TREE;
break;
}
}
while (1);
if (declv)
{
cp_parser_parse_definitely (parser);
nbraces += bracecount;
}
}
/* Note that we saved the original contents of this flag when we entered
the structured block, and so we don't need to re-save it here. */
parser->in_statement = IN_OMP_FOR;
/* Note that the grammar doesn't call for a structured block here,
though the loop as a whole is a structured block. */
body = push_stmt_list ();
cp_parser_statement (parser, NULL_TREE, false, NULL);
body = pop_stmt_list (body);
if (declv == NULL_TREE)
ret = NULL_TREE;
else
ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body,
pre_body, clauses);
while (nbraces)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
cp_lexer_consume_token (parser->lexer);
nbraces--;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
if (!collapse_err)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"collapsed loops not perfectly nested");
}
collapse_err = true;
cp_parser_statement_seq_opt (parser, NULL);
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
break;
}
}
while (!VEC_empty (tree, for_block))
add_stmt (pop_stmt_list (VEC_pop (tree, for_block)));
release_tree_vector (for_block);
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop */
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (1u << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, sb, ret;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for", pragma_tok);
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block */
static tree
cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_master (input_location,
cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block */
static tree
cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
cp_parser_omp_sections_scope (cp_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
cp_token *tok;
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return NULL_TREE;
stmt = push_stmt_list ();
if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION)
{
unsigned save;
substmt = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
while (1)
{
cp_parser_statement (parser, NULL_TREE, false, NULL);
tok = cp_lexer_peek_token (parser->lexer);
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
}
cp_parser_end_omp_structured_block (parser, save);
substmt = finish_omp_structured_block (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
while (1)
{
tok = cp_lexer_peek_token (parser->lexer);
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_require_pragma_eol (parser, tok);
error_suppress = false;
}
else if (!error_suppress)
{
cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = cp_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope */
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, ret;
clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections", pragma_tok);
ret = cp_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line */
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "sections") == 0)
{
cp_lexer_consume_token (parser->lexer);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
cp_parser_statement (parser, NULL_TREE, false, NULL);
par_clause = clauses;
break;
case PRAGMA_OMP_PARALLEL_FOR:
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
cp_parser_omp_for_loop (parser, ws_clause, &par_clause);
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause);
stmt = cp_parser_omp_sections_scope (parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
break;
default:
gcc_unreachable ();
}
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (par_clause, block);
if (p_kind != PRAGMA_OMP_PARALLEL)
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block */
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single", pragma_tok);
OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
structured-block */
#define OMP_TASK_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_FINAL) \
| (1u << PRAGMA_OMP_CLAUSE_MERGEABLE))
static tree
cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, block;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task", pragma_tok);
block = begin_omp_task ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_task (clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line */
static void
cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskwait ();
}
/* OpenMP 3.1:
# pragma omp taskyield new-line */
static void
cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskyield ();
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok)
{
tree vars;
vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_threadprivate (vars);
}
/* Main entry point to OpenMP statement pragmas. */
static void
cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt;
switch (pragma_tok->pragma_kind)
{
case PRAGMA_OMP_ATOMIC:
cp_parser_omp_atomic (parser, pragma_tok);
return;
case PRAGMA_OMP_CRITICAL:
stmt = cp_parser_omp_critical (parser, pragma_tok);
break;
case PRAGMA_OMP_FOR:
stmt = cp_parser_omp_for (parser, pragma_tok);
break;
case PRAGMA_OMP_MASTER:
stmt = cp_parser_omp_master (parser, pragma_tok);
break;
case PRAGMA_OMP_ORDERED:
stmt = cp_parser_omp_ordered (parser, pragma_tok);
break;
case PRAGMA_OMP_PARALLEL:
stmt = cp_parser_omp_parallel (parser, pragma_tok);
break;
case PRAGMA_OMP_SECTIONS:
stmt = cp_parser_omp_sections (parser, pragma_tok);
break;
case PRAGMA_OMP_SINGLE:
stmt = cp_parser_omp_single (parser, pragma_tok);
break;
case PRAGMA_OMP_TASK:
stmt = cp_parser_omp_task (parser, pragma_tok);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, pragma_tok->location);
}
/* Transactional Memory parsing routines. */
/* Parse a transaction attribute.
txn-attribute:
attribute
[ [ identifier ] ]
??? Simplify this when C++0x bracket attributes are
implemented properly. */
static tree
cp_parser_txn_attribute_opt (cp_parser *parser)
{
cp_token *token;
tree attr_name, attr = NULL;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
return cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
return NULL_TREE;
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE))
goto error1;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME || token->type == CPP_KEYWORD)
{
token = cp_lexer_consume_token (parser->lexer);
attr_name = (token->type == CPP_KEYWORD
/* For keywords, use the canonical spelling,
not the parsed identifier. */
? ridpointers[(int) token->keyword]
: token->u.value);
attr = build_tree_list (attr_name, NULL_TREE);
}
else
cp_parser_error (parser, "expected identifier");
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
error1:
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return attr;
}
/* Parse a __transaction_atomic or __transaction_relaxed statement.
transaction-statement:
__transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt]
compound-statement
__transaction_relaxed txn-noexcept-spec[opt] compound-statement
*/
static tree
cp_parser_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1, new_in;
cp_token *token;
tree stmt, attrs, noex;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true);
/* Keep track if we're in the lexical scope of an outer transaction. */
new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
stmt = begin_transaction_stmt (token->location, NULL, this_in);
parser->in_transaction = new_in;
cp_parser_compound_statement (parser, NULL, false, false);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, NULL, this_in, noex);
return stmt;
}
/* Parse a __transaction_atomic or __transaction_relaxed expression.
transaction-expression:
__transaction_atomic txn-noexcept-spec[opt] ( expression )
__transaction_relaxed txn-noexcept-spec[opt] ( expression )
*/
static tree
cp_parser_transaction_expression (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1;
cp_token *token;
tree expr, noex;
bool noex_expr;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
if (!flag_tm)
error (keyword == RID_TRANSACTION_RELAXED
? G_("%<__transaction_relaxed%> without transactional memory "
"support enabled")
: G_("%<__transaction_atomic%> without transactional memory "
"support enabled"));
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
/* Set this early. This might mean that we allow transaction_cancel in
an expression that we find out later actually has to be a constexpr.
However, we expect that cxx_constant_value will be able to deal with
this; also, if the noexcept has no constexpr, then what we parse next
really is a transaction's body. */
parser->in_transaction = this_in;
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr,
true);
if (!noex || !noex_expr
|| cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
finish_parenthesized_expr (expr);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
/* The only expression that is available got parsed for the noexcept
already. noexcept is true then. */
expr = noex;
noex = boolean_true_node;
}
expr = build_transaction_expr (token->location, expr, this_in, noex);
parser->in_transaction = old_in;
if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION))
return error_mark_node;
return (flag_tm ? expr : error_mark_node);
}
/* Parse a function-transaction-block.
function-transaction-block:
__transaction_atomic txn-attribute[opt] ctor-initializer[opt]
function-body
__transaction_atomic txn-attribute[opt] function-try-block
__transaction_relaxed ctor-initializer[opt] function-body
__transaction_relaxed function-try-block
*/
static bool
cp_parser_function_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char new_in = 1;
tree compound_stmt, stmt, attrs;
bool ctor_initializer_p;
cp_token *token;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
new_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in);
parser->in_transaction = new_in;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE);
return ctor_initializer_p;
}
/* Parse a __transaction_cancel statement.
cancel-statement:
__transaction_cancel txn-attribute[opt] ;
__transaction_cancel txn-attribute[opt] throw-expression ;
??? Cancel and throw is not yet implemented. */
static tree
cp_parser_transaction_cancel (cp_parser *parser)
{
cp_token *token;
bool is_outer = false;
tree stmt, attrs;
token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL,
RT_TRANSACTION_CANCEL);
gcc_assert (token != NULL);
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
/* ??? Parse cancel-and-throw here. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!flag_tm)
{
error_at (token->location, "%<__transaction_cancel%> without "
"transactional memory support enabled");
return error_mark_node;
}
else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
{
error_at (token->location, "%<__transaction_cancel%> within a "
"%<__transaction_relaxed%>");
return error_mark_node;
}
else if (is_outer)
{
if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
&& !is_tm_may_cancel_outer (current_function_decl))
{
error_at (token->location, "outer %<__transaction_cancel%> not "
"within outer %<__transaction_atomic%>");
error_at (token->location,
" or a %<transaction_may_cancel_outer%> function");
return error_mark_node;
}
}
else if (parser->in_transaction == 0)
{
error_at (token->location, "%<__transaction_cancel%> not within "
"%<__transaction_atomic%>");
return error_mark_node;
}
stmt = build_tm_abort_call (token->location, is_outer);
add_stmt (stmt);
finish_stmt ();
return stmt;
}
/* The parser. */
static GTY (()) cp_parser *the_parser;
/* Special handling for the first token or line in the file. The first
thing in the file might be #pragma GCC pch_preprocess, which loads a
PCH file, which is a GC collection point. So we need to handle this
first pragma without benefit of an existing lexer structure.
Always returns one token to the caller in *FIRST_TOKEN. This is
either the true first token of the file, or the first token after
the initial pragma. */
static void
cp_parser_initial_pragma (cp_token *first_token)
{
tree name = NULL;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS)
return;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type == CPP_STRING)
{
name = first_token->u.value;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type != CPP_PRAGMA_EOL)
error_at (first_token->location,
"junk at end of %<#pragma GCC pch_preprocess%>");
}
else
error_at (first_token->location, "expected string literal");
/* Skip to the end of the pragma. */
while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF)
cp_lexer_get_preprocessor_token (NULL, first_token);
/* Now actually load the PCH file. */
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
/* Read one more token to return to our caller. We have to do this
after reading the PCH file in, since its pointers have to be
live. */
cp_lexer_get_preprocessor_token (NULL, first_token);
}
/* Normal parsing of a pragma token. Here we can (and must) use the
regular lexer. */
static bool
cp_parser_pragma (cp_parser *parser, enum pragma_context context)
{
cp_token *pragma_tok;
unsigned int id;
pragma_tok = cp_lexer_consume_token (parser->lexer);
gcc_assert (pragma_tok->type == CPP_PRAGMA);
parser->lexer->in_pragma = true;
id = pragma_tok->pragma_kind;
switch (id)
{
case PRAGMA_GCC_PCH_PREPROCESS:
error_at (pragma_tok->location,
"%<#pragma GCC pch_preprocess%> must be first");
break;
case PRAGMA_OMP_BARRIER:
switch (context)
{
case pragma_compound:
cp_parser_omp_barrier (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_FLUSH:
switch (context)
{
case pragma_compound:
cp_parser_omp_flush (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp flush%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKWAIT:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskwait (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskwait%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKYIELD:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskyield (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskyield%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_THREADPRIVATE:
cp_parser_omp_threadprivate (parser, pragma_tok);
return false;
case PRAGMA_OMP_ATOMIC:
case PRAGMA_OMP_CRITICAL:
case PRAGMA_OMP_FOR:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_ORDERED:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SINGLE:
case PRAGMA_OMP_TASK:
if (context == pragma_external)
goto bad_stmt;
cp_parser_omp_construct (parser, pragma_tok);
return true;
case PRAGMA_OMP_SECTION:
error_at (pragma_tok->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
break;
default:
gcc_assert (id >= PRAGMA_FIRST_EXTERNAL);
c_invoke_pragma_handler (id);
break;
bad_stmt:
cp_parser_error (parser, "expected declaration specifiers");
break;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
cp_token *tok;
enum cpp_ttype ret;
tok = cp_lexer_peek_token (the_parser->lexer);
ret = tok->type;
*value = tok->u.value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else if (ret == CPP_STRING)
*value = cp_parser_string_literal (the_parser, false, false);
else
{
cp_lexer_consume_token (the_parser->lexer);
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
}
return ret;
}
/* External interface. */
/* Parse one entire translation unit. */
void
c_parse_file (void)
{
static bool already_called = false;
if (already_called)
{
sorry ("inter-module optimizations not implemented for C++");
return;
}
already_called = true;
the_parser = cp_parser_new ();
push_deferring_access_checks (flag_access_control
? dk_no_deferred : dk_no_check);
cp_parser_translation_unit (the_parser);
the_parser = NULL;
}
#include "gt-cp-parser.h"
|
undirected_edge.h | #pragma once
#include <gms/common/types.h>
#include <vector>
#include <cassert>
namespace GMS::LinkPrediction {
/**
* Represents an undirected edge as a std::pair, with the invariant first <= second.
*/
class UndirectedEdge : public std::pair<NodeId, NodeId>
{
public:
UndirectedEdge() : std::pair<NodeId, NodeId>(0, 0) {}
UndirectedEdge(NodeId u, NodeId v) : std::pair<NodeId, NodeId>(u, v) {
assert(u <= v);
}
};
// NOTE: Currently only used for debug assertions.
template <class SGraph>
int64_t count_undirected_edges(const SGraph &graph) {
int64_t count = 0;
int64_t num_nodes = graph.num_nodes();
int64_t self_cycles = 0;
#pragma omp parallel for reduction(+: count, self_cycles)
for (NodeId u = 0; u < num_nodes; ++u) {
for (NodeId v : graph.out_neigh(u)) {
if (u < v) {
++count;
assert(graph.out_neigh(v).contains(u));
} else if (u == v) {
++self_cycles;
}
}
}
return count + self_cycles / 2;
}
} |
schur_eliminator_impl.h | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
// Chunk::start ?
#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
// Eigen has an internal threshold switching between different matrix
// multiplication algorithms. In particular for matrices larger than
// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
// matrix matrix product algorithm that has a higher setup cost. For
// matrix sizes close to this threshold, especially when the matrices
// are thin and long, the default choice may not be optimal. This is
// the case for us, as the default choice causes a 30% performance
// regression when we moved from Eigen2 to Eigen3.
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
#include <algorithm>
#include <map>
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/map_util.h"
#include "ceres/schur_eliminator.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
#include "Eigen/Dense"
#include "glog/logging.h"
namespace ceres {
namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
STLDeleteElements(&rhs_locks_);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
const int num_col_blocks = int(bs->cols.size());
const int num_row_blocks = int(bs->rows.size());
buffer_size_ = 1;
chunks_.clear();
lhs_row_layout_.clear();
int lhs_num_rows = 0;
// Add a map object for each block in the reduced linear system
// and build the row/column block structure of the reduced linear
// system.
lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
lhs_num_rows += bs->cols[i].size;
}
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
// containing the same y block are vertically contiguous. Along
// the way also compute the amount of space each chunk will need
// to perform the elimination.
while (r < num_row_blocks) {
const int chunk_block_id = bs->rows[r].cells.front().block_id;
if (chunk_block_id >= num_eliminate_blocks_) {
break;
}
chunks_.push_back(Chunk());
Chunk& chunk = chunks_.back();
chunk.size = 0;
chunk.start = r;
int buffer_size = 0;
const int e_block_size = bs->cols[chunk_block_id].size;
// Add to the chunk until the first block in the row is
// different than the one in the first row for the chunk.
while (r + chunk.size < num_row_blocks) {
const CompressedRow& row = bs->rows[r + chunk.size];
if (row.cells.front().block_id != chunk_block_id) {
break;
}
// Iterate over the blocks in the row, ignoring the first
// block since it is the one to be eliminated.
for (int c = 1; c < row.cells.size(); ++c) {
const Cell& cell = row.cells[c];
if (InsertIfNotPresent(
&(chunk.buffer_layout), cell.block_id, buffer_size)) {
buffer_size += e_block_size * bs->cols[cell.block_id].size;
}
}
buffer_size_ = max(buffer_size, buffer_size_);
++chunk.size;
}
CHECK_GT(chunk.size, 0);
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
if (num_threads_ > 1) {
random_shuffle(chunks_.begin(), chunks_.end());
}
buffer_.reset(new double[buffer_size_ * num_threads_]);
// chunk_outer_product_buffer_ only needs to store e_block_size *
// f_block_size, which is always less than buffer_size_, so we just
// allocate buffer_size_ per thread.
chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
rhs_locks_[i] = new Mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
Eliminate(const BlockSparseMatrix* A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
VectorRef(rhs, lhs->num_rows()).setZero();
}
const CompressedRowBlockStructure* bs = A->block_structure();
const int num_col_blocks = int(bs->cols.size());
// Add the diagonal to the schur complement.
if (D != NULL) {
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
const int block_id = i - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block_id, block_id,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block_size = bs->cols[i].size;
typename EigenTypes<kFBlockSize>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
}
}
}
// Eliminate y blocks one chunk at a time. For each chunk,x3
// compute the entries of the normal equations and the gradient
// vector block corresponding to the y block and then apply
// Gaussian elimination to them. The matrix ete stores the normal
// matrix corresponding to the block being eliminated and array
// buffer_ contains the non-zero blocks in the row corresponding
// to this y block in the normal equations. This computation is
// done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies
// gaussian elimination to the rhs of the normal equations,
// updating the rhs of the reduced linear system by modifying rhs
// blocks for all the z blocks that share a row block/residual
// term with the y block. EliminateRowOuterProduct does the
// corresponding operation for the lhs of the reduced linear
// system.
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = 0; i < chunks_.size(); ++i) {
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
double* buffer = buffer_.get() + thread_id * buffer_size_;
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
VectorRef(buffer, buffer_size_).setZero();
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
FixedArray<double, 8> g(e_block_size);
typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
gref.setZero();
// We are going to be computing
//
// S += F'F - F'E(E'E)^{-1}E'F
//
// for each Chunk. The computation is broken down into a number of
// function calls as below.
// Compute the outer product of the e_blocks with themselves (ete
// = E'E). Compute the product of the e_blocks with the
// corresonding f_blocks (buffer = E'F), the gradient of the terms
// in this chunk (g) and add the outer product of the f_blocks to
// Schur complement (S += F'F).
ChunkDiagonalBlockAndGradient(
chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
// Normally one wouldn't compute the inverse explicitly, but
// e_block_size will typically be a small number like 3, in
// which case its much faster to compute the inverse once and
// use it to multiply other matrices/vectors instead of doing a
// Solve call over and over again.
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
ete
.template selfadjointView<Eigen::Upper>()
.llt()
.solve(Matrix::Identity(e_block_size, e_block_size));
// For the current chunk compute and update the rhs of the reduced
// linear system.
//
// rhs = F'b - F'E(E'E)^(-1) E'b
FixedArray<double, 8> inverse_ete_g(e_block_size);
MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
inverse_ete.data(),
e_block_size,
e_block_size,
g.get(),
inverse_ete_g.get());
UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
}
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
BackSubstitute(const BlockSparseMatrix* A,
const double* b,
const double* D,
const double* z,
double* y) {
const CompressedRowBlockStructure* bs = A->block_structure();
#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
for (int i = 0; i < chunks_.size(); ++i) {
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
ete(e_block_size, e_block_size);
if (D != NULL) {
const typename EigenTypes<kEBlockSize>::ConstVectorRef
diag(D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
}
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
DCHECK_EQ(e_block_id, e_cell.block_id);
FixedArray<double, 8> sj(row.block.size);
typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
sj.get());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
sj.get(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
}
ete.llt().solveInPlace(y_block);
}
}
// Update the rhs of the reduced linear system. Compute
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
int b_pos = bs->rows[row_block_counter].block.position;
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
typename EigenTypes<kRowBlockSize>::ConstVectorRef
(b + b_pos, row.block.size);
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
CeresMutexLock l(rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
}
b_pos += row.block.size;
}
}
// Given a Chunk - set of rows with the same e_block, e.g. in the
// following Chunk with two rows.
//
// E F
// [ y11 0 0 0 | z11 0 0 0 z51]
// [ y12 0 0 0 | z12 z22 0 0 0]
//
// this function computes twp matrices. The diagonal block matrix
//
// ete = y11 * y11' + y12 * y12'
//
// and the off diagonal blocks in the Guass Newton Hessian.
//
// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
//
// which are zero compressed versions of the block sparse matrices E'E
// and E'F.
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
// Iterate over the rows in this chunk, for each row, compute the
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
if (row.cells.size() > 1) {
EBlockRowOuterProduct(A, row_block_counter + j, lhs);
}
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
// g += E_i' b_i
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
double* buffer_ptr =
buffer + FindOrDie(chunk.buffer_layout, f_block_id);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
}
b_pos += row.block.size;
}
}
// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
// Schur complement matrix, i.e
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkOuterProduct(const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
// references to the left hand side.
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
// S(i,j) -= bi' * ete^{-1} b_j
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
CeresMutexLock l(&cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For rows with no e_blocks, the schur complement update reduces to S
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowsUpdate(const BlockSparseMatrix* A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const double* values = A->values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
// very similar in structure to EBlockRowOuterProduct except for
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
// e_blocks. This is becase rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
// detection will lead to most template parameters being set to
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
// For a row with an e_block, compute the contribition S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
EBlockRowOuterProduct(const BlockSparseMatrix* A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A->block_structure();
const CompressedRow& row = bs->rows[row_block_index];
const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block1,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
CeresMutexLock l(&cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
}
for (int j = i + 1; j < row.cells.size(); ++j) {
const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info = lhs->GetCell(block1, block2,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
CeresMutexLock l(&cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
}
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
|
DRB094-doall2-ordered-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
ordered(2) is used to associate two loops with omp for.
The corresponding loop iteration variables are private.
ordered(n) is an OpenMP 4.5 addition.
*/
#include <stdio.h>
#include <omp.h>
int a[100][100];
int main()
{
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = i + j;
}
}
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
printf("test i=%d j=%d\n",i,j);
}
}
return 0;
}
|
GB_binop__ne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_int8
// A.*B function (eWiseMult): GB_AemultB__ne_int8
// A*D function (colscale): GB_AxD__ne_int8
// D*A function (rowscale): GB_DxB__ne_int8
// C+=B function (dense accum): GB_Cdense_accumB__ne_int8
// C+=b function (dense accum): GB_Cdense_accumb__ne_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int8
// C=scalar+B GB_bind1st__ne_int8
// C=scalar+B' GB_bind1st_tran__ne_int8
// C=A+scalar GB_bind2nd__ne_int8
// C=A'+scalar GB_bind2nd_tran__ne_int8
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_int8
// op(A') function: GB_unop_tran__identity_int32_int8
// C type: int32_t
// A type: int8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_int8
(
int32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residual_based_bdf_displacement_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bdf_scheme.h"
#include "includes/variables.h"
#include "includes/checks.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFDisplacementScheme
* @ingroup KratosCore
* @brief BDF integration scheme (displacement based)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* Look at the base class for more details
* @see ResidualBasedBDFScheme
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFDisplacementScheme
: public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedBDFDisplacementScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFDisplacementScheme );
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType;
/// Data type definition
typedef typename BDFBaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BDFBaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// DoF array type definition
typedef typename BDFBaseType::DofsArrayType DofsArrayType;
/// DoF vector type definition
typedef typename Element::DofsVectorType DofsVectorType;
/// Nodes containers definition
typedef ModelPart::NodesContainerType NodesArrayType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method (parameters)
* @param ThisParameters Parameters with the integration order
*/
explicit ResidualBasedBDFDisplacementScheme(Parameters ThisParameters)
: ResidualBasedBDFDisplacementScheme(ThisParameters.Has("integration_order") ? static_cast<std::size_t>(ThisParameters["integration_order"].GetInt()) : 2)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"integration_order" : 2
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFDisplacementScheme(const std::size_t Order = 2)
:BDFBaseType(Order)
{
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFDisplacementScheme(ResidualBasedBDFDisplacementScheme& rOther)
:BDFBaseType(rOther)
{
}
/**
* Clone
*/
typename BaseType::Pointer Clone() override
{
return Kratos::make_shared<ResidualBasedBDFDisplacementScheme>(*this);
}
/** Destructor.
*/
~ResidualBasedBDFDisplacementScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
ProcessInfo& current_process_info = rModelPart.GetProcessInfo();
const double delta_time = current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = rModelPart.Nodes().begin() + i;
//ATTENTION::: the prediction is performed only on free nodes
const array_1d<double, 3>& dot2un1 = it_node->FastGetSolutionStepValue(ACCELERATION, 1);
const array_1d<double, 3>& dotun1 = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& un1 = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
const array_1d<double, 3>& dot2un0 = it_node->FastGetSolutionStepValue(ACCELERATION);
array_1d<double, 3>& dotun0 = it_node->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3>& un0 = it_node->FastGetSolutionStepValue(DISPLACEMENT);
if (it_node->HasDofFor(ACCELERATION_X)) {
if (it_node -> IsFixed(ACCELERATION_X)) {
dotun0[0] = (dot2un0[0] - BDFBaseType::mBDF[1] * dotun1[0])/BDFBaseType::mBDF[0];
un0[0] = (dotun0[0] - BDFBaseType::mBDF[1] * un1[0])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_X)) {
if (it_node -> IsFixed(VELOCITY_X)) {
un0[0] = (dotun1[0] - BDFBaseType::mBDF[1] * un1[0])/BDFBaseType::mBDF[0];
} } else if (it_node -> IsFixed(DISPLACEMENT_X) == false) {
un0[0] = un1[0] + delta_time * dotun1[0] + 0.5 * std::pow(delta_time, 2) * dot2un1[0];
}
if (it_node->HasDofFor(ACCELERATION_Y)) {
if (it_node -> IsFixed(ACCELERATION_Y)) {
dotun0[1] = (dot2un0[1] - BDFBaseType::mBDF[1] * dotun1[1])/BDFBaseType::mBDF[0];
un0[1] = (dotun0[1] - BDFBaseType::mBDF[1] * un1[1])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_Y)) {
if (it_node -> IsFixed(VELOCITY_Y)) {
un0[1] = (dotun1[1] - BDFBaseType::mBDF[1] * un1[1])/BDFBaseType::mBDF[0];
} } else if (it_node -> IsFixed(DISPLACEMENT_Y) == false) {
un0[1] = un1[1] + delta_time * dotun1[1] + 0.5 * std::pow(delta_time, 2) * dot2un1[1];
}
// For 3D cases
if (it_node -> HasDofFor(DISPLACEMENT_Z)) {
if (it_node->HasDofFor(ACCELERATION_Z)) {
if (it_node -> IsFixed(ACCELERATION_Z)) {
dotun0[2] = (dot2un0[2] - BDFBaseType::mBDF[1] * dotun1[2])/BDFBaseType::mBDF[0];
un0[2] = (dotun0[2] - BDFBaseType::mBDF[1] * un1[2])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_Y)) {
if (it_node -> IsFixed(VELOCITY_Y)) {
un0[2] = (dotun1[2] - BDFBaseType::mBDF[1] * un1[2])/BDFBaseType::mBDF[0];
} } else if (it_node -> IsFixed(DISPLACEMENT_Z) == false) {
un0[2] = un1[2] + delta_time * dotun1[2] + 0.5 * std::pow(delta_time, 2) * dot2un1[2];
}
}
for (std::size_t i_order = 2; i_order < BDFBaseType::mOrder + 1; ++i_order) {
const array_1d<double, 3>& dotun = it_node->FastGetSolutionStepValue(VELOCITY, i_order);
const array_1d<double, 3>& un = it_node->FastGetSolutionStepValue(DISPLACEMENT, i_order);
if (it_node->HasDofFor(ACCELERATION_X)) {
if (it_node -> IsFixed(ACCELERATION_X)) {
dotun0[0] -= (BDFBaseType::mBDF[i_order] * dotun[0])/BDFBaseType::mBDF[0];
un0[0] -= (BDFBaseType::mBDF[i_order] * un[0])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_X)) {
if (it_node -> IsFixed(VELOCITY_X)) {
un0[0] -= (BDFBaseType::mBDF[i_order] * un[0])/BDFBaseType::mBDF[0];
} }
if (it_node->HasDofFor(ACCELERATION_Y)) {
if (it_node -> IsFixed(ACCELERATION_Y)) {
dotun0[1] -= (BDFBaseType::mBDF[i_order] * dotun[1])/BDFBaseType::mBDF[0];
un0[1] -= (BDFBaseType::mBDF[i_order] * un[1])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_Y)) {
if (it_node -> IsFixed(VELOCITY_X)) {
un0[1] -= (BDFBaseType::mBDF[i_order] * un[1])/BDFBaseType::mBDF[0];
} }
// For 3D cases
if (it_node -> HasDofFor(DISPLACEMENT_Z)) {
if (it_node->HasDofFor(ACCELERATION_Z)) {
if (it_node -> IsFixed(ACCELERATION_Z)) {
dotun0[1] -= (BDFBaseType::mBDF[i_order] * dotun[2])/BDFBaseType::mBDF[0];
un0[1] -= (BDFBaseType::mBDF[i_order] * un[2])/BDFBaseType::mBDF[0];
} } else if (it_node->HasDofFor(VELOCITY_Y)) {
if (it_node -> IsFixed(VELOCITY_X)) {
un0[1] -= (BDFBaseType::mBDF[i_order] * un[2])/BDFBaseType::mBDF[0];
} }
}
}
// Updating time derivatives
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided.
* @details Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY;
const int err = BDFBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for variables keys
// Verify that the variables are correctly initialized
KRATOS_CHECK_VARIABLE_KEY(DISPLACEMENT)
KRATOS_CHECK_VARIABLE_KEY(VELOCITY)
KRATOS_CHECK_VARIABLE_KEY(ACCELERATION)
// Check that variables are correctly allocated
for(auto& rnode : rModelPart.Nodes()) {
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DISPLACEMENT,rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY,rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(ACCELERATION,rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_X, rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Y, rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Z, rnode)
}
KRATOS_CATCH( "" );
return 0;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFDisplacementScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dotun0 = itNode->FastGetSolutionStepValue(VELOCITY);
noalias(dotun0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(DISPLACEMENT);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
noalias(dotun0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(DISPLACEMENT, i_order);
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dot2un0 = itNode->FastGetSolutionStepValue(ACCELERATION);
noalias(dot2un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(VELOCITY);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
noalias(dot2un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(VELOCITY, i_order);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFDisplacementScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME defined */
|
GB_convert_sparse_to_bitmap_template.c | //------------------------------------------------------------------------------
// GB_convert_sparse_to_bitmap_template: convert A from sparse to bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
#if defined ( GB_ATYPE )
const GB_ATYPE *restrict Axold = (GB_ATYPE *) A->x ;
GB_ATYPE *restrict Axnew = (GB_ATYPE *) Ax_new ;
#endif
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,j) to be operated on by this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, avlen) ;
// the start of A(:,j) in the new bitmap
int64_t pA_new = j * avlen ;
//------------------------------------------------------------------
// convert A(:,j) from sparse to bitmap
//------------------------------------------------------------------
if (nzombies == 0)
{
for (int64_t p = pA_start ; p < pA_end ; p++)
{
// A(i,j) has index i, value Axold [p]
int64_t i = Ai [p] ;
int64_t pnew = i + pA_new ;
// move A(i,j) to its new place in the bitmap
// Axnew [pnew] = Axold [p]
GB_COPY (Axnew, pnew, Axold, p) ;
Ab [pnew] = 1 ;
}
}
else
{
for (int64_t p = pA_start ; p < pA_end ; p++)
{
// A(i,j) has index i, value Axold [p]
int64_t i = Ai [p] ;
if (!GB_IS_ZOMBIE (i))
{
int64_t pnew = i + pA_new ;
// move A(i,j) to its new place in the bitmap
// Axnew [pnew] = Axold [p]
GB_COPY (Axnew, pnew, Axold, p) ;
Ab [pnew] = 1 ;
}
}
}
}
}
done = true ;
}
#undef GB_ATYPE
|
prog.c | #include <stdio.h>
#include <omp.h>
void fun()
{
#pragma omp parallel
{
#pragma omp master
{
printf("Thread in master %d\n", omp_get_thread_num());
}
#pragma omp single
{
printf("Thread in single %d\n", omp_get_thread_num());
}
}
}
int main(int argc, char **argv)
{
fun();
return 0;
}
|
argmax.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_ARGMAX_H_
#define MACE_KERNELS_ARGMAX_H_
#include <algorithm>
#include <functional>
#include <limits>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/kernels/kernel.h"
#include "mace/public/mace.h"
#include "mace/utils/utils.h"
namespace mace {
namespace kernels {
template <DeviceType D, typename T>
struct ArgMaxFunctor : OpKernel {
explicit ArgMaxFunctor(OpKernelContext *context) : OpKernel(context) {}
MaceStatus operator()(const Tensor *input,
const Tensor *axis,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
MACE_CHECK(input->dim_size() > 0, "ArgMax input should not be a scalar");
MACE_CHECK(axis->dim_size() == 0, "Mace argmax only supports scalar axis");
Tensor::MappingGuard axis_guard(axis);
int axis_value = axis->data<int32_t>()[0];
if (axis_value < 0) {
axis_value += input->dim_size();
}
MACE_CHECK(axis_value == input->dim_size() - 1,
"Mace argmax only supports last dimension as axis");
std::vector<index_t> output_shape(input->dim_size() - 1);
for (index_t d = 0; d < input->dim_size() - 1; ++d) {
output_shape[d] = input->dim(d < axis_value ? d : d + 1);
}
MACE_RETURN_IF_ERROR(output->Resize(output_shape));
Tensor::MappingGuard input_guard(input);
Tensor::MappingGuard output_guard(output);
auto input_data = input->data<T>();
auto output_data = output->mutable_data<int32_t>();
index_t outer_size = output->size();
index_t inner_size = input->dim(axis_value);
#pragma omp parallel for
for (index_t i = 0; i < outer_size; ++i) {
int idx = 0;
T max_value = std::numeric_limits<T>::lowest();
const T *input_ptr = input_data + i * inner_size;
for (index_t j = 0; j < inner_size; ++j) {
if (input_ptr[j] > max_value) {
max_value = input_ptr[j];
idx = j;
}
}
output_data[i] = idx;
}
return MACE_SUCCESS;
}
};
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_ARGMAX_H_
|
grid_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "vhf/fblas.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#define ALL_IMAGES 255
#define IMGBLK 40
#define OF_CMPLX 2
double CINTcommon_fac_sp(int l);
void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff,
double *env, int l, int np, int nc,
size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_ip_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff,
double *env, int l, int np, int nc,
size_t nao, size_t ngrids, size_t bgrids);
/*
* Extend the meaning of non0table: given shell ID and block ID,
* non0table is the number of images in Ls that does not vanish.
* Ls should be sorted based on the distance to center cell.
*/
void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids,
double *Ls, int nimgs,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel
{
int i, j, m;
int np, nc, atm_id;
size_t bas_id, ib;
double rr, arr, maxc;
double logcoeff[NPRIMAX];
double dr[3];
double rL[3];
double *p_exp, *pcoeff, *ratm;
#pragma omp for nowait schedule(dynamic)
for (bas_id = 0; bas_id < nbas; bas_id++) {
np = bas[NPRIM_OF+bas_id*BAS_SLOTS];
nc = bas[NCTR_OF +bas_id*BAS_SLOTS];
p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS];
pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS];
atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS];
ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD];
for (j = 0; j < np; j++) {
maxc = 0;
for (i = 0; i < nc; i++) {
maxc = MAX(maxc, fabs(pcoeff[i*np+j]));
}
logcoeff[j] = log(maxc);
}
for (ib = 0; ib < nblk; ib++) {
for (m = nimgs-1; m >= 0; m--) {
rL[0] = ratm[0] + Ls[m*3+0];
rL[1] = ratm[1] + Ls[m*3+1];
rL[2] = ratm[2] + Ls[m*3+2];
for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) {
dr[0] = coords[0*ngrids+i] - rL[0];
dr[1] = coords[1*ngrids+i] - rL[1];
dr[2] = coords[2*ngrids+i] - rL[2];
rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
for (j = 0; j < np; j++) {
arr = p_exp[j] * rr;
if (arr-logcoeff[j] < EXPCUTOFF) {
non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1);
goto next_blk;
}
}
}
}
non0table[ib*nbas+bas_id] = 0;
next_blk:;
}
}
}
}
static void _copy(double complex *out, double *ao_k,
size_t ngrids, size_t bgrids,
int nkpts, int ncomp, int nao, int ncol)
{
int i, j, k, ic;
double complex *pout;
double *ao_r, *ao_i;
size_t blksize = ncomp * ncol * bgrids;
for (k = 0; k < nkpts; k++) {
ao_r = ao_k + k*2 * blksize;
ao_i = ao_k +(k*2+1) * blksize;
for (ic = 0; ic < ncomp; ic++) {
pout = out + (k * ncomp + ic) * nao * ngrids;
for (j = 0; j < ncol; j++) {
for (i = 0; i < bgrids; i++) {
pout[j*ngrids+i] = (ao_r[j*bgrids+i] +
ao_i[j*bgrids+i]*_Complex_I);
} }
ao_r += ncol * bgrids;
ao_i += ncol * bgrids;
}
}
}
// grid2atm[nimgs,xyz,grid_id]
static void _fill_grid2atm(double *grid2atm, double *min_grid2atm,
double *coord, double *Ls, double *r_atm,
int atm_imag_max, size_t bgrids, size_t ngrids, int nimgs)
{
size_t ig, m;
double rL[3];
double dist;
double dist_min;
for (m = 0; m < nimgs; m++) {
if ((m < atm_imag_max || atm_imag_max == ALL_IMAGES)) {
rL[0] = r_atm[0] + Ls[m*3+0];
rL[1] = r_atm[1] + Ls[m*3+1];
rL[2] = r_atm[2] + Ls[m*3+2];
dist_min = 1e9;
for (ig = 0; ig < bgrids; ig++) {
grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0];
grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1];
grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2];
dist = (grid2atm[0*BLKSIZE+ig]*grid2atm[0*BLKSIZE+ig] +
grid2atm[1*BLKSIZE+ig]*grid2atm[1*BLKSIZE+ig] +
grid2atm[2*BLKSIZE+ig]*grid2atm[2*BLKSIZE+ig]);
dist_min = MIN(dist, dist_min);
}
min_grid2atm[m] = sqrt(dist_min);
}
grid2atm += 3*BLKSIZE;
}
}
void PBCeval_cart_iter(FPtr_eval feval, FPtr_exp fexp,
size_t nao, size_t ngrids, size_t bgrids, size_t offao,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *Ls, double complex *expLk,
int nimgs, int nkpts, int di_max, double complex *ao,
double *coord, double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
const int nkpts2 = nkpts * OF_CMPLX;
int i, j, k, l, np, nc, atm_id, bas_id, deg, ao_id;
int iL, iL0, iLcount, dimc;
int grid2atm_atm_id, count;
double fac;
double *p_exp, *pcoeff, *pcoord, *pao, *ri;
double *grid2atm = buf; // shape [nimgs,3,bgrids]
double *eprim = grid2atm + nimgs*3*BLKSIZE;
double *aobuf = eprim + NPRIMAX*BLKSIZE*2;
double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids;
double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX;
double complex *zLk_buf = (double complex *)Lk_buf;
double *min_grid2atm = Lk_buf + IMGBLK * nkpts * OF_CMPLX;
double *pexpLk;
int img_idx[nimgs];
int atm_imag_max[natm];
for (i = 0; i < natm; i++) {
atm_imag_max[i] = 0;
}
for (bas_id = sh0; bas_id < sh1; bas_id++) {
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]);
}
grid2atm_atm_id = -1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = (l+1)*(l+2)/2;
dimc = nc*deg * ncomp * bgrids;
fac = CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (grid2atm_atm_id != atm_id) {
_fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri,
atm_imag_max[atm_id], bgrids, ngrids, nimgs);
grid2atm_atm_id = atm_id;
}
for (i = 0; i < nkpts2*dimc; i++) {
aobufk[i] = 0;
}
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
count = 0;
for (iL = iL0; iL < iL0+iLcount; iL++) {
pcoord = grid2atm + iL * 3*BLKSIZE;
if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) &&
(min_grid2atm[iL] < rcut[bas_id]) &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) {
pao = aobuf + count * dimc;
(*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*deg, bgrids, bgrids);
img_idx[count] = iL;
count += 1;
}
}
if (count > 0) {
if (img_idx[count-1] != iL0 + count-1) {
// some images are skipped
for (i = 0; i < count; i++) {
j = img_idx[i];
for (k = 0; k < nkpts; k++) {
zLk_buf[i*nkpts+k] = expLk[j*nkpts+k];
}
}
pexpLk = Lk_buf;
} else {
pexpLk = (double *)(expLk + nkpts * iL0);
}
dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count,
&D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc);
}
}
_copy(ao+ao_id*ngrids+offao, aobufk,
ngrids, bgrids, nkpts, ncomp, nao, nc*deg);
}
}
void PBCeval_sph_iter(FPtr_eval feval, FPtr_exp fexp,
size_t nao, size_t ngrids, size_t bgrids, size_t offao,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *Ls, double complex *expLk,
int nimgs, int nkpts, int di_max, double complex *ao,
double *coord, double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
const int nkpts2 = nkpts * OF_CMPLX;
int i, j, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id;
int iL, iL0, iLcount, dimc;
int grid2atm_atm_id, count;
double fac;
double *p_exp, *pcoeff, *pcoord, *pcart, *pao, *ri;
double *grid2atm = buf; // shape [nimgs,3,bgrids]
double *eprim = grid2atm + nimgs*3*BLKSIZE;
double *aobuf = eprim + NPRIMAX*BLKSIZE*2;
double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids;
double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX;
double complex *zLk_buf = (double complex *)Lk_buf;
double *cart_gto = Lk_buf + IMGBLK * nkpts * OF_CMPLX;
double *min_grid2atm = cart_gto + ncomp*NCTR_CART*bgrids;
double *pexpLk;
int img_idx[nimgs];
int atm_imag_max[natm];
for (i = 0; i < natm; i++) {
atm_imag_max[i] = 0;
}
for (bas_id = sh0; bas_id < sh1; bas_id++) {
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]);
}
grid2atm_atm_id = -1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = l * 2 + 1;
dcart = (l+1)*(l+2)/2;
dimc = nc*deg * ncomp * bgrids;
fac = CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (grid2atm_atm_id != atm_id) {
_fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri,
atm_imag_max[atm_id], bgrids, ngrids, nimgs);
grid2atm_atm_id = atm_id;
}
NPdset0(aobufk, ((size_t)nkpts2) * dimc);
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
count = 0;
for (iL = iL0; iL < iL0+iLcount; iL++) {
pcoord = grid2atm + iL * 3*BLKSIZE;
if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) &&
(min_grid2atm[iL] < rcut[bas_id]) &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) {
pao = aobuf + ((size_t)count) * dimc;
if (l <= 1) { // s, p functions
(*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, bgrids, bgrids);
} else {
(*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, bgrids, bgrids);
pcart = cart_gto;
for (i = 0; i < ncomp * nc; i++) {
CINTc2s_ket_sph1(pao, pcart, bgrids, bgrids, l);
pao += deg * bgrids;
pcart += dcart * bgrids;
}
}
img_idx[count] = iL;
count++;
}
}
if (count > 0) {
if (img_idx[count-1] != iL0 + count-1) {
// some images are skipped
for (i = 0; i < count; i++) {
j = img_idx[i];
for (k = 0; k < nkpts; k++) {
zLk_buf[i*nkpts+k] = expLk[j*nkpts+k];
}
}
pexpLk = Lk_buf;
} else {
pexpLk = (double *)(expLk + nkpts * iL0);
}
dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count,
&D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc);
}
}
_copy(ao+ao_id*ngrids+offao, aobufk,
ngrids, bgrids, nkpts, ncomp, nao, nc*deg);
}
}
int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas);
/*
* blksize <= 1024 to avoid stack overflow
*
* non0table[ngrids/blksize,natm] is the T/F table for ao values to
* screen the ao evaluation for each shell
*/
void PBCeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shloc[shls_slice[1]-shls_slice[0]+1];
const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas);
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
const size_t Ngrids = ngrids;
int i;
int di_max = 0;
for (i = shls_slice[0]; i < shls_slice[1]; i++) {
di_max = MAX(di_max, ao_loc[i+1] - ao_loc[i]);
}
#pragma omp parallel
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const size_t nao = ao_loc[sh1] - ao_loc[sh0];
int ip, ib, k, iloc, ish;
size_t aoff, bgrids;
size_t bufsize =((nimgs*3 + NPRIMAX*2 +
nkpts *param[POS_E1]*param[TENSOR]*di_max * OF_CMPLX +
IMGBLK*param[POS_E1]*param[TENSOR]*di_max +
param[POS_E1]*param[TENSOR]*NCTR_CART) * BLKSIZE
+ nkpts * IMGBLK * OF_CMPLX + nimgs);
double *buf = malloc(sizeof(double) * bufsize);
#pragma omp for nowait schedule(dynamic, 1)
for (k = 0; k < nblk*nshblk; k++) {
iloc = k / nblk;
ish = shloc[iloc];
ib = k - iloc * nblk;
ip = ib * BLKSIZE;
aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip;
bgrids = MIN(ngrids-ip, BLKSIZE);
(*fiter)(feval, fexp, nao, Ngrids, bgrids, aoff,
param, shloc+iloc, ao_loc, buf,
Ls, expLk, nimgs, nkpts, di_max,
ao, coord+ip, rcut, non0table+ib*nbas,
atm, natm, bas, nbas, env);
}
free(buf);
}
}
void PBCeval_cart_drv(FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
PBCeval_loop(PBCeval_cart_iter, feval, fexp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCeval_sph_drv(FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
PBCeval_loop(PBCeval_sph_iter, feval, fexp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv0(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 1};
PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 1};
PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv1(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 4};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 4};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv2(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 10};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 10};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv3(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 20};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 20};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv4(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 35};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 35};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
// int param[] = {1, 1};
// PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
// ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
// ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
PBCGTOval_cart_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
// int param[] = {1, 1};
// PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
// ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
// ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
PBCGTOval_sph_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_ip_cart(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 3};
PBCeval_cart_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_ip_sph(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 3};
PBCeval_sph_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
|
omp_target_config.h | #ifndef OMP_TARGET_CONFIG_H_
#define OMP_TARGET_CONFIG_H_
static void warm_up() {
int i, n = 8;
int *x, *y, *z;
x = (int *) malloc(n * sizeof(int));
y = (int *) malloc(n * sizeof(int));
z = (int *) malloc(n * sizeof(int));
for (i = 0; i != n; i++) { x[i] = 1; y[i] = 1; z[i] = 0; }
#pragma omp target map(from:z[0:n]) map(to:y[0:n],x[0:n])
#pragma omp parallel for
for (i = 0; i < 8; ++i) z[i] = x[i] + y[i];
}
#endif
|
pi-v14.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation */
// WARNING : incorrect code
#pragma omp parallel
{
#pragma omp task private(i,x) shared(sum)
for (i=0; i < num_steps/2; i++) {
x = (i+0.5)*step;
#pragma omp atomic
sum += 4.0/(1.0+x*x);
#if _DEBUG_
int id = omp_get_thread_num();
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp task private(i,x) shared(sum)
for (i=num_steps/2; i < num_steps; i++) {
x = (i+0.5)*step;
#pragma omp atomic
sum += 4.0/(1.0+x*x);
#if _DEBUG_
int id = omp_get_thread_num();
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp taskwait
#pragma omp task
pi = step * sum;
}
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
GB_unaryop__identity_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint32
// op(A') function: GB_tran__identity_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
syr2k.c | /**
* syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.10
/* Problem size */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define N SIZE
#define M SIZE
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0)
*/
#define ALPHA 12435
#define BETA 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
C[i * N + j] = ((DATA_TYPE)i * j + 2) / N;
}
for (j = 0; j < M; j++) {
A[i * N + j] = ((DATA_TYPE)i * j) / N;
B[i * N + j] = ((DATA_TYPE)i * j + 1) / N;
}
}
}
void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i, j, k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
C[i * N + j] *= BETA;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < M; k++) {
C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k];
C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k];
}
}
}
}
void syr2k_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *Cinit) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
Cinit[i * N + j] *= BETA;
}
}
DATA_TYPE sum;
//#pragma omp target map(to: A[:N*M], B[:N*M]) map(tofrom: C[:N*N]) device
//(DEVICE_ID)
/*#pragma omp parallel for //collapse(2)
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
//sum = 0;
for (k = 0; k < M; k++)
{
C[i*N + j] += ALPHA * A[i*M + k] * B[j*M + k];
C[i*N + j] += ALPHA * B[i*M + k] * A[j*M + k];
}
//C[i*N + j] += sum;
}
}*/
#pragma omp target map(to : A[ : N *M], \
B[ : N *M], Cinit[ : N *N]) \
map(from : C[ : N *N]) device(DEVICE_ID)
#pragma omp parallel for // collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
C[i * N + j] = Cinit[i * N + j];
for (int k = 0; k < M; k++) {
C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k];
C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k];
}
}
}
}
int compareResults(DATA_TYPE *C, DATA_TYPE *C_Gpu) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main() {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *Cinit;
DATA_TYPE *C_Gpu;
A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
Cinit = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C_Gpu = (DATA_TYPE *)calloc(N * M, sizeof(DATA_TYPE));
fprintf(stdout, "<< Symmetric rank-2k operations >>\n");
init_arrays(A, B, Cinit);
t_start = rtclock();
syr2k_OMP(A, B, C_Gpu, Cinit);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
init_arrays(A, B, C);
t_start = rtclock();
syr2k(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(C, C_Gpu);
#endif
free(A);
free(B);
free(C);
free(C_Gpu);
return fail;
}
|
test5.c | int g1;
void bar();
void foo() {
0;
g1;
g1 = 20;
#pragma omp barrier
1;
#pragma omp barrier
2;
#pragma omp barrier
g1;
3;
}
void foobar() {
4;
#pragma omp barrier
5;
g1;
g1 = 30;
#pragma omp barrier
6;
#pragma omp barrier
7;
}
int main() {
#pragma omp parallel
{
8;
switch (9) {
case 1:
10;
bar();
11;
break;
case 2:
13;
foo();
14;
break;
default:
15;
g1 = 10;
foobar();
16;
break;
}
17;
}
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriately.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
const unsigned short
*p;
ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)*
Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+
(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
*lut;
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*number_bins,
clahe_info->y*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut));
if (lut == (unsigned short *) NULL)
{
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickFalse);
}
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
lut=(unsigned short *) RelinquishMagickMemory(lut);
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
(void) GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
ImageType
type;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
type=IdentifyImageType(image,exception);
if ((type == GrayscaleType) || (type == BilevelType))
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(enhance_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaImageTag "Gamma/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,PerceptibleReciprocal(gamma))));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
area,
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
area=point.y;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.y < 0.5) ? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel4);
pixel=zero;
area=point.z;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.z < 0.5)? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
area,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
PerceptibleReciprocal(gamma));
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriately. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace = UndefinedColorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
{
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
colorspace=UndefinedColorspace;
}
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
if (IsPixelGray(image,q) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e B a l a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteBalanceImage() applies white balancing to an image according to a
% grayworld assumption in the LAB colorspace.
%
% The format of the WhiteBalanceImage method is:
%
% MagickBooleanType WhiteBalanceImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteBalanceImage(Image *image,
ExceptionInfo *exception)
{
#define WhiteBalanceImageTag "WhiteBalance/Image"
CacheView
*image_view;
const char
*artifact;
double
a_mean,
b_mean;
MagickOffsetType
progress;
MagickStatusType
status;
ssize_t
y;
/*
White balance image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=TransformImageColorspace(image,LabColorspace,exception);
a_mean=0.0;
b_mean=0.0;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
a_mean+=QuantumScale*GetPixela(image,p)-0.5;
b_mean+=QuantumScale*GetPixelb(image,p)-0.5;
p+=GetPixelChannels(image);
}
}
a_mean/=((double) image->columns*image->rows);
b_mean/=((double) image->columns*image->rows);
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b;
/*
Scale the chroma distance shifted according to amount of luminance.
*/
a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean;
b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean;
SetPixela(image,ClampToQuantum(a),q);
SetPixelb(image,ClampToQuantum(b),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
artifact=GetImageArtifact(image,"white-balance:vibrance");
if (artifact != (const char *) NULL)
{
ChannelType
channel_mask;
double
black_point;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Level the a & b channels.
*/
flags=ParseGeometry(artifact,&geometry_info);
black_point=geometry_info.rho;
if ((flags & PercentValue) != 0)
black_point*=(double) (QuantumRange/100.0);
channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel |
bChannel));
status&=LevelImage(image,black_point,(double) QuantumRange-black_point,
1.0,exception);
(void) SetImageChannelMask(image,channel_mask);
}
status&=TransformImageColorspace(image,sRGBColorspace,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
add.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add()
{
int i, j, k, m;
//kai
// int k16;
// consistent_data(&k16, "int", 1);
if (timeron) timer_start(t_add);
#pragma omp parallel for default(shared) private(i,j,k,m)
for (k = 1; k <= nz2; k++) {
for (j = 1; j <= ny2; j++) {
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 5; m++) {
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
//kai
k16 = k;
}
if (timeron) timer_stop(t_add);
}
|
hipstyle_vs_ompred.c | //
// hipstyle_vs_ompred.c: This test shows how a hipstyle reduction in an openmp
// target region compares to the simple-to-code omp reduction.
//
#include <omp.h>
#include <stdio.h>
#define N 5000001
// These macros allows compilation with -DNUM_TEAMS=<testval> and
// -DNUM_THREADS=<testval> Default NUM_TEAMS set for vega number of CUs
#ifndef NUM_TEAMS
#define NUM_TEAMS 60
#endif
#ifndef NUM_THREADS
#define NUM_THREADS 1024
#endif
void __kmpc_impl_syncthreads();
int main() {
int main_rc = 0;
double expect = (double)(((double)N - 1) * (double)N) / 2.0;
// dry runs to initialize hsa_queue's
for(int i = 0; i < 4; i++) {
#pragma omp target
{}
}
// Initialize GPUs with a simple kernel
#pragma omp target
printf("GPUs initialized NUM_TEAMS:%d NUM_THREADS:%d\n",
NUM_TEAMS, NUM_THREADS);
// --------- Calculate sum using manual reduction technique -------
double hipstyle_sum = 0.0;
double t0 = omp_get_wtime();
#pragma omp target teams distribute parallel for num_teams(NUM_TEAMS) \
thread_limit(NUM_THREADS) map(tofrom: hipstyle_sum)
for (int kk = 0; kk < NUM_TEAMS * NUM_THREADS; kk++) {
// A HIP or CUDA kernel will use builtin values with names like these
// We get these values from the OpenMP target API;
unsigned int BlockIdx_x = omp_get_team_num();
unsigned int ThreadIdx_x = omp_get_thread_num();
unsigned int GridDim_x = NUM_TEAMS; // could be omp_get_num_teams()
unsigned int BlockDim_x = NUM_THREADS; // could be omp_get_num_threads()
// tb_sum is an LDS array that is shared only within a team.
// The openmp pteam allocator for shared arrays does not work yet.
// But this attribute makes the array LDS.
static __attribute__((address_space(3))) double tb_sum[NUM_THREADS];
int i = BlockDim_x * BlockIdx_x + ThreadIdx_x;
tb_sum[ThreadIdx_x] = 0.0;
for (; i < N; i += BlockDim_x * GridDim_x)
tb_sum[ThreadIdx_x] += (double)i;
// clang does not permit #pragma omp barrier here
// But we need one, so use the internal libomptarget barrier
#if defined(__AMDGCN__) || defined(__NVPTX__)
__kmpc_impl_syncthreads();
#endif
// Reduce each team into tb_sum[0]
for (int offset = BlockDim_x / 2; offset > 0; offset /= 2) {
if (ThreadIdx_x < offset)
tb_sum[ThreadIdx_x] += tb_sum[ThreadIdx_x + offset];
}
// Atomically reduce each teams sum to a single value.
// This is concurrent access by NUM_TEAMS workgroups to a single global val
// For machines with hardware fp atomic use the hint here.
if (ThreadIdx_x == 0) {
#pragma omp atomic
hipstyle_sum += tb_sum[0];
}
// FYI. In a real code, if reduced value (hipstyle_sum) were needed on GPU
// after this point you would need some sort of cross-team barrier.
} // END TARGET REGION
double t1 = omp_get_wtime() - t0;
if (hipstyle_sum == expect) {
printf("Success HIP-style sum of %d integers is: %14.0f in %f secs\n",
N - 1, hipstyle_sum, t1);
} else {
printf("FAIL HIPSTYLE SUM N:%d result: %f != expect: %f \n", N - 1,
hipstyle_sum, expect);
main_rc = 1;
}
// --------- Calculate sum using OpenMP reduction -------
double ompred_sum = 0.0;
double t2 = omp_get_wtime();
#pragma omp target teams distribute parallel for num_teams(NUM_TEAMS) \
thread_limit(NUM_THREADS) map(tofrom: hipstyle_sum) reduction(+:ompred_sum)
for (int ii = 0; ii < N; ++ii)
ompred_sum += (double)ii;
double t3 = omp_get_wtime() - t2;
if (ompred_sum == expect) {
printf("Success OMP reduction sum of %d integers is: %14.0f in %f secs\n",
N - 1, ompred_sum, t3);
} else {
printf("FAIL REDUCTION SUM N:%d result: %f != expect: %f \n", N - 1,
ompred_sum, expect);
main_rc = 1;
}
return main_rc;
}
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2 v3: = ri0 ri4 ri0n , i <- 1-7
// v4 = ri1 / ri3 / ri6
// v5 = ri2 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%2] \n"
"add %2, %2, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmul v9.4s, v1.4s, %18.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %18.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %18.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %18.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %19.s[0] \n"
"fmla v0.4s, v5.4s, %19.s[1] \n"
"fmla v9.4s, v4.4s, %19.s[2] \n"
//i = 2
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%3] \n" // v1 v2 v3: = r20 r24 r20n
"add %3, %3, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n" // v4 = r21
"fmla v9.4s, v1.4s, %20.s[0] \n" // *+ r10
"ext v5.16b, v1.16b, v2.16b, #8 \n" // v5 = r22
"fmla v0.4s, v4.4s, %20.s[1] \n" // *+ r11
"ext v4.16b, v1.16b, v2.16b, #12 \n" // v4 = r23
"fmla v9.4s, v5.4s, %20.s[2] \n" // *+ r1
"ext v5.16b, v2.16b, v3.16b, #4 \n" // v5 = r25
"fmla v0.4s, v4.4s, %20.s[3] \n" // *+ r13
"ext v4.16b, v2.16b, v3.16b, #8 \n" // v4 = r26
"fmla v9.4s, v2.4s, %21.s[0] \n" // *+ r14
"fmla v0.4s, v5.4s, %21.s[1] \n" // *+ r15
"fmla v9.4s, v4.4s, %21.s[2] \n" // *+ r16
//i = 3
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %22.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %22.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %23.s[0] \n"
"fmla v0.4s, v5.4s, %23.s[1] \n"
"fmla v9.4s, v4.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %24.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %24.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %25.s[0] \n"
"fmla v0.4s, v5.4s, %25.s[1] \n"
"fmla v9.4s, v4.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %26.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %26.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %27.s[0] \n"
"fmla v0.4s, v5.4s, %27.s[1] \n"
"fmla v9.4s, v4.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%7] \n"
"add %7, %7, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %28.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %28.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %29.s[0] \n"
"fmla v0.4s, v5.4s, %29.s[1] \n"
"fmla v9.4s, v4.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%8] \n"
"add %8, %8, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %30.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %30.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %31.s[0] \n"
"fmla v0.4s, v5.4s, %31.s[1] \n"
"fmla v9.4s, v4.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#endif // __clang__
#else //__aarch32__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
// "veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmul.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmul.f32 q14, q1, d8[1] \n"
"vmul.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2: = _ri0/_ri1 first
// v3 v4: = then _r0_8101214/_r0_9111315
// v5 = ri2 / ri4 / ri6
// v6 = ri3 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #512] \n"
"ld2 {v1.4s, v2.4s}, [%2] \n" // v1 v2 = _r00 _r01
"add %2, %2, #32 \n"
"ld2 {v3.4s, v4.4s}, [%2] \n" // v3 v4 = _r0_8101214 / _r0_9111315
"fmul v9.4s, v1.4s, %18.s[0] \n" // *+ _r00
"ext v5.16b, v1.16b, v3.16b, #4 \n" // v5 = _r02
"fmla v0.4s, v2.4s, %18.s[1] \n" // *+ _r01
"ext v6.16b, v2.16b, v4.16b, #4 \n" // v6 = _r03
"fmla v9.4s, v5.4s, %18.s[2] \n" // *+ _r02
"ext v5.16b, v1.16b, v3.16b, #8 \n" // v5 = _r04
"fmla v0.4s, v6.4s, %18.s[3] \n" // *+ _r03
"ext v6.16b, v2.16b, v4.16b, #8 \n" // v6 = _r05
"fmla v9.4s, v5.4s, %19.s[0] \n" // *+ _r04
"ext v5.16b, v1.16b, v3.16b, #12 \n" // v5 = _r06
"fmla v0.4s, v6.4s, %19.s[1] \n" // *+ _r05
"fmla v9.4s, v5.4s, %19.s[2] \n" // *+ _r06
//i = 2
"prfm pldl1keep, [%3, #512] \n"
"ld2 {v1.4s, v2.4s}, [%3] \n"
"add %3, %3, #32 \n"
"ld2 {v3.4s, v4.4s}, [%3] \n"
"fmla v9.4s, v1.4s, %20.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %20.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %20.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %20.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %21.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %21.s[1] \n"
"fmla v9.4s, v5.4s, %21.s[2] \n"
//i = 3
"prfm pldl1keep, [%4, #512] \n"
"ld2 {v1.4s, v2.4s}, [%4] \n"
"add %4, %4, #32 \n"
"ld2 {v3.4s, v4.4s}, [%4] \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %22.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %22.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %23.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %23.s[1] \n"
"fmla v9.4s, v5.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #512] \n"
"ld2 {v1.4s, v2.4s}, [%5] \n"
"add %5, %5, #32 \n"
"ld2 {v3.4s, v4.4s}, [%5] \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %24.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %24.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %25.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %25.s[1] \n"
"fmla v9.4s, v5.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #512] \n"
"ld2 {v1.4s, v2.4s}, [%6] \n"
"add %6, %6, #32 \n"
"ld2 {v3.4s, v4.4s}, [%6] \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %26.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %26.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %27.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %27.s[1] \n"
"fmla v9.4s, v5.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #512] \n"
"ld2 {v1.4s, v2.4s}, [%7] \n"
"add %7, %7, #32 \n"
"ld2 {v3.4s, v4.4s}, [%7] \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %28.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %28.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %29.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %29.s[1] \n"
"fmla v9.4s, v5.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #512] \n"
"ld2 {v1.4s, v2.4s}, [%8] \n"
"add %8, %8, #32 \n"
"ld2 {v3.4s, v4.4s}, [%8] \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %30.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %30.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %31.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %31.s[1] \n"
"fmla v9.4s, v5.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v6", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#endif // __clang__
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
// "veor q14, q14 \n"// _sum2 = 0;
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmul.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmul.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
omp_hello.c | /* a simple openMP program */
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]){
int num_threads = 999999;
// omp_set_num_threads(4);
// or use num_threads(4) as part of the pragma below
#pragma omp parallel
{
int thread_id = omp_get_thread_num();
#pragma omp master
{
num_threads = omp_get_num_threads();
}
#pragma omp barrier
printf("Hello from thread %d nthread %d\n", thread_id, num_threads);
} // End of Parallel region
return 0;
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "mvt.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*mvt.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int n, double x1[2000], double x2[2000], double y_1[2000], double y_2[2000], double A[2000][2000]) {
int i, j;
for(i = 0; i < n; i++) {
x1[i] = (double) (i % n) / n;
x2[i] = (double) ((i + 1) % n) / n;
y_1[i] = (double) ((i + 3) % n) / n;
y_2[i] = (double) ((i + 4) % n) / n;
for(j = 0; j < n; j++)
A[i][j] = (double) (i * j % n) / n;
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int n, double x1[2000], double x2[2000]) {
int i;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "x1");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", x1[i]);
}
fprintf(stderr, "\nend dump: %s\n", "x1");
fprintf(stderr, "begin dump: %s", "x2");
for(i = 0; i < n; i++) {
if(i % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", x2[i]);
}
fprintf(stderr, "\nend dump: %s\n", "x2");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_mvt(int n, double x1[2000], double x2[2000], double y_1[2000], double y_2[2000], double A[2000][2000]) {
int i, j;
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, A, y_1)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(n, i, A, y_1) reduction(+ : x1[i])
for(j = 0; j < n; j++)
x1[i] = x1[i] + A[i][j] * y_1[j];
}
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, A, y_2)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(n, i, A, y_2) reduction(+ : x2[i])
for(j = 0; j < n; j++)
x2[i] = x2[i] + A[j][i] * y_2[j];
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 2000;
/*Variable declaration/allocation.*/
double (*A)[2000][2000];
A = (double (*)[2000][2000]) polybench_alloc_data((2000 + 0) * (2000 + 0), sizeof(double));
;
double (*x1)[2000];
x1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*x2)[2000];
x2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*y_1)[2000];
y_1 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
double (*y_2)[2000];
y_2 = (double (*)[2000]) polybench_alloc_data(2000 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(n, *x1, *x2, *y_1, *y_2, *A);
/*Start timer.*/
;
/*Run kernel.*/
kernel_mvt(n, *x1, *x2, *y_1, *y_2, *A);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *x1, *x2);
/*Be clean.*/
free((void *) A);
;
free((void *) x1);
;
free((void *) x2);
;
free((void *) y_1);
;
free((void *) y_2);
;
return 0;
}
|
pfmg_setup_rap7.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.26 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the z-direction. This macro is used to
* allow for coarsening to be done in the x- and y-directions also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 2); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 3;
/*--------------------------------------------------------------------------
* hypre_PFMGCreateCoarseOp7
* Sets up new coarse grid operator stucture. Fine grid
* operator is 7pt and so is coarse, i.e. non-Galerkin.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_PFMGCreateCoarseOp7( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_Index index_temp;
HYPRE_Int k, j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 3;
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 7 point coarse grid stencil
*--------------------------------------------------------------------*/
RAP_stencil_size = 7;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Storage for 7 elements (c,w,e,n,s,a,b)
*--------------------------------------------------------------*/
if (i*j == 0 && i*k == 0 && j*k == 0)
{
hypre_SetIndex(index_temp,i,j,k);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 7 point coarse grid stencil
* Only store the lower triangular part + diagonal = 4 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*--------------------------------------------------------------------*/
RAP_stencil_size = 4;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 1; k++)
{
for (j = -1; j < 1; j++)
{
for (i = -1; i < 1; i++)
{
/*--------------------------------------------------------------
* Store 4 elements in (c,w,s,b)
*--------------------------------------------------------------*/
if (i*j == 0 && i*k == 0 && j*k == 0)
{
hypre_SetIndex(index_temp,i,j,k);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* hypre_PFMGBuildCoarseOp7
* Sets up new coarse grid operator stucture. Fine grid operator is 7pt and
* so is coarse, i.e. non-Galerkin.
*
* Uses the non-Galerkin strategy from Ashby & Falgout's original ParFlow
* algorithm. For constant_coefficient==2, see [issue663].
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMGBuildCoarseOp7( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *fgrid;
hypre_BoxArray *fgrid_boxes;
hypre_Box *fgrid_box;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
HYPRE_Int *cgrid_ids;
hypre_IndexRef cstart, bfstart, stridef;
hypre_Index fstart, bcstart, stridec;
hypre_Index loop_size;
HYPRE_Int constant_coefficient;
HYPRE_Int fi, ci, fbi;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *RAP_dbox;
hypre_BoxArray *bdy_boxes, *tmp_boxes;
hypre_Box *bdy_box, *fcbox;
double *pb, *pa;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn, *a_cb, *a_ca;
double *rap_cc, *rap_cw, *rap_ce, *rap_cs, *rap_cn;
double *rap_cb, *rap_ca;
double west, east, south, north;
double center_int, center_bdy;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iPm1, iPp1;
HYPRE_Int OffsetA;
HYPRE_Int OffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_boxes = hypre_StructGridBoxes(fgrid);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
hypre_assert( hypre_StructMatrixConstantCoefficient(A) == constant_coefficient );
if ( constant_coefficient==0 )
{
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 0 );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 0 );
}
else /* 1 or 2 */
{
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 1 );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 1 );
}
fcbox = hypre_BoxCreate();
bdy_boxes = hypre_BoxArrayCreate(0);
tmp_boxes = hypre_BoxArrayCreate(0);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
fgrid_box = hypre_BoxArrayBox(fgrid_boxes, fi);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pb is pointer for weight for f-point below c-point
* pa is pointer for weight for f-point above c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
* a_cb is pointer for below coefficient
* a_ca is pointer for above coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_cb = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ca = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_cb = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ca = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
OffsetP = hypre_BoxOffsetDistance(P_dbox,index);
OffsetA = hypre_BoxOffsetDistance(A_dbox,index);
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 7-point coarse grid operator.
*--------------------------------------------------------------*/
if ( constant_coefficient==0 )
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iA,iAc,iAm1,iAp1,iPm1,iPp1,west,east,south,north) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(iP, iA, iAc)
{
iAm1 = iA - OffsetA;
iAp1 = iA + OffsetA;
iPm1 = iP - OffsetP;
iPp1 = iP + OffsetP;
rap_cb[iAc] = a_cb[iA] * pa[iPm1];
rap_ca[iAc] = a_ca[iA] * pb[iPp1];
west = a_cw[iA] + 0.5 * a_cw[iAm1] + 0.5 * a_cw[iAp1];
east = a_ce[iA] + 0.5 * a_ce[iAm1] + 0.5 * a_ce[iAp1];
south = a_cs[iA] + 0.5 * a_cs[iAm1] + 0.5 * a_cs[iAp1];
north = a_cn[iA] + 0.5 * a_cn[iAm1] + 0.5 * a_cn[iAp1];
/*-----------------------------------------------------
* Prevent non-zero entries reaching off grid
*-----------------------------------------------------*/
if(a_cw[iA] == 0.0) west = 0.0;
if(a_ce[iA] == 0.0) east = 0.0;
if(a_cs[iA] == 0.0) south = 0.0;
if(a_cn[iA] == 0.0) north = 0.0;
rap_cw[iAc] = west;
rap_ce[iAc] = east;
rap_cs[iAc] = south;
rap_cn[iAc] = north;
rap_cc[iAc] = a_cc[iA]
+ a_cw[iA] + a_ce[iA] + a_cs[iA] + a_cn[iA]
+ a_cb[iA] * pb[iP] + a_ca[iA] * pa[iP]
- west - east - south - north;
}
hypre_BoxLoop3End(iP, iA, iAc);
}
else if ( constant_coefficient==1 )
{
rap_cb[0] = rap_ca[0] = a_cb[0] * pa[0];
rap_cw[0] = rap_ce[0] = 2.0*a_cw[0];
rap_cs[0] = rap_cn[0] = 2.0*a_cs[0];
rap_cc[0] = a_cc[0] - 2.0*( a_cw[0] + a_cs[0] - rap_cb[0] );
}
else if ( constant_coefficient==2 )
{
/* NOTE: This does not reduce to either of the above operators unless
* the row sum is zero and the interpolation weights are 1/2 */
rap_cb[0] = rap_ca[0] = 0.5*a_cb[0];
rap_cw[0] = rap_ce[0] = 2.0*a_cw[0];
rap_cs[0] = rap_cn[0] = 2.0*a_cs[0];
center_int = 3.0*a_cb[0];
center_bdy = 0.5*a_cb[0] + (a_cw[0] + a_cs[0] + a_cb[0]);
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
rap_cc[iAc] = 2.0*a_cc[iA] + center_int;
}
hypre_BoxLoop2End(iA, iAc);
hypre_CopyBox(cgrid_box, fcbox);
hypre_StructMapCoarseToFine(hypre_BoxIMin(fcbox), cindex, cstride,
hypre_BoxIMin(fcbox));
hypre_StructMapCoarseToFine(hypre_BoxIMax(fcbox), cindex, cstride,
hypre_BoxIMax(fcbox));
hypre_BoxArraySetSize(bdy_boxes, 0);
if (hypre_BoxIMinD(fcbox, cdir) == hypre_BoxIMinD(fgrid_box, cdir))
{
hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, -1, bdy_boxes);
}
if (hypre_BoxIMaxD(fcbox, cdir) == hypre_BoxIMaxD(fgrid_box, cdir))
{
hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, 1, tmp_boxes);
hypre_AppendBoxArray(tmp_boxes, bdy_boxes);
}
hypre_ForBoxI(fbi, bdy_boxes)
{
bdy_box = hypre_BoxArrayBox(bdy_boxes, fbi);
hypre_BoxGetSize(bdy_box, loop_size);
bfstart = hypre_BoxIMin(bdy_box);
hypre_StructMapFineToCoarse(bfstart, cindex, cstride, bcstart);
hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size,
A_dbox, bfstart, stridef, iA,
RAP_dbox, bcstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
rap_cc[iAc] -= 0.5*a_cc[iA] + center_bdy;
}
hypre_BoxLoop2End(iA, iAc);
}
}
} /* end ForBoxI */
hypre_BoxDestroy(fcbox);
hypre_BoxArrayDestroy(bdy_boxes);
hypre_BoxArrayDestroy(tmp_boxes);
return hypre_error_flag;
}
|
d2p.c |
/////////////////////////// 8INF854 - ARCHITECTURES PARRALLELES - DEVOIR #2 ///////////////////////////////////
///////////////////////////// d2p.c - Corentin RAOULT - Adrien Cambillau /////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
////////////////////// déclaration des fonctions /////////////////////////
int* remplirTAB(int n);
void afficherTAB(int* TAB, int n);
void triFusionParallele(int * TAB, int n);
void fusion(int * U, int taille_U, int * V, int taille_V , int * T);
void fonctionTriFusionParallele(int n);
///////////////////// MAIN ////////////////////////////////////////////////
int main(int argc, char* argv[])
{
//printf("Tri fusion parallèle\n");
if(argc == 2)
{
char* buffer = argv[1];
int num = atoi(buffer);
fonctionTriFusionParallele(num);
}
else
{
int n=2;
printf("entrez un entier n (il sera multiplié 2)\n");
scanf("%d",&n);
fonctionTriFusionParallele(n);
}
return EXIT_SUCCESS;
}
/////////////////// développement des fonctions /////////////////////////////////
void fonctionTriFusionParallele(int n)
{
int * T = remplirTAB(n);
//afficherTAB(T,2*n);
double fin;
double debut = omp_get_wtime();//--> encore un pb, mesure le temps sur un thread
#pragma omp parallel
{
triFusionParallele(T, 2*n);
}
fin = omp_get_wtime();
//afficherTAB(T, 2*n);
//printf("n = %d \tdurée = %lf \n", 2*n,fin -debut);
printf("\t %lf \n",fin -debut);
free(T);
}
int* remplirTAB(int n)
{
int i;
int* T = malloc(2*n*sizeof(int));
int k=0, l=n;
for(i=0;i<2*n;i++)
{
if(i%2 == 0)
{
T[k]=i;
k++;
}
else
{
T[l]=i;
l++;
}
}
return T;
}
void afficherTAB(int* TAB, int n)
{
int j;
printf("TAB : { ");
for(j = 0; j < n; j++)
{
printf(" [%d] ",TAB[j]);
}
printf(" }\n");
}
void triFusionParallele(int * TAB, int n)
{
int i;
int * U = malloc((n/2)*sizeof(int));
int * V = malloc((n/2)*sizeof(int));
for(i=0; i<n/2;i++)
{
U[i]=TAB[i];
V[i]=TAB[i+(n/2)];
}
if(n>=2)//si n==1 pas besoin de trier les tableaux
{
#pragma omp single nowait
{
#pragma omp task
triFusionParallele(U,n/2);
#pragma omp task
triFusionParallele(V,n/2);
#pragma omp taskwait
fusion(U,n/2,V,n/2,TAB);
}
}
free(U);
free(V);
}
void fusion(int * U, int taille_U, int * V, int taille_V , int * T)
{
int i=0,j=0;
int k;
for(k=0; k<(taille_U+taille_V);k++)
{
if (U[i]<V[j] && i<taille_U)
T[k]=U[i++];
else
T[k]=V[j++];
}
}
|
enpass_fmt_plug.c | /* JtR format to crack Enpass Password Manager databases.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_enpass;
#elif FMT_REGISTERS_H
john_register_one(&fmt_enpass);
#else
#include <string.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include "aes.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "enpass_common.h"
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // this is a slow format, so 4 should be enough
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "enpass"
#define FORMAT_NAME "Enpass Password Manager"
#define FORMAT_TAG "$enpass$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FILE_HEADER_SZ 16
#define SQLITE_FILE_HEADER "SQLite format 3"
#define SQLITE_MAX_PAGE_SIZE 65536
static struct fmt_tests enpass_tests[] = {
{"$enpass$0$24000$700dfb6d83ae3b4b87935ed8246123363656de4273979a1365197a632c6b1ce68ca801d0bb50d93c9a0509fbb061bba2ad579ed0d48ee781508c853b9bd042d3275cc92781770a211ecd08a254db873e50664a14b394d63e3e443a82d69c7df84c592a60b5b620e241c9675f097f931093f6ebf67f56e5db0d82eb61ff9da3636bf7c79598e6ee1f34b7abd2b1e5e3ae9e9a219de50d9c079fb7fb21910139468619c6ac562a4157c0e8e85df08b54aff33ec2005e2214549ba04d794882051e8e245f63f822d469c6588ccd38c02154f21cdfd06acd5ed1b97cbe7e23648ce70c471560222cd8927b0567cd0a3c317b7a8add994dc8fcda89ae4afc33c1260192e3c8c3ca9d50347a91a82025c1cb127aede8334286cc26f86591d34483b90d86d1e1372f74d1b7eee5aa233ed9199a3de01e7d16b092b4c902a602a16edcf03005596abc5c24f249dbb48236dc27738e93949c383734f6e39bf199fcd3fd22ab9268d1678d7259f94ab2c012e924ff2d26772ebf2cccc0ffe795264cd7a035f52f258b5ce78b7f1353c120f1aa30cbe943832fa70d3762222365109521c1a70a7ace321ddda173fb731c1d6f65c8e4af8f7b62660bc70a2c9ece21f8cddbe65d047f92aa6ca55a90864cb12c757030a7755ec4601a6f28dc2e728ee3f84fc1d39c261c845335a9d19e3356192b257186ff606756e58df67c11d2886870c90b69f5b51630f72d79f51884528214e9987865debb6b23ce8deecfb67cd43450a73675b53fcd20b6ae1da13f69dd349045d0b9b7dded042020ad081143231c79778d01f91c6e6df823885860ea781dd07867222b438599d02a815a4c18409c5e97a3d8e870ce1401bce7c556f05ac77af2659ef9b13d0d4df32a54674ef451cc2ffef50d4ca31efe19644db389ae9f0ce97686e5e53f1d82b98136258708911641b3a251eea41e6433534eb2810df49e040901367ee42b12cf7f853bab46f5360da2429989d232c9f6897e44221a2a5e946563db10423cfb073b6abf1e977f746e1d9c0fb929bb0e2c9dd50c11c76e0219a0004aa747de0db075305d4582293727f16f215403a9ca3d99af1750343101162954daebd58358b21276346519b2c05942223ad8314073900169b222b0e24f79c76dc61b4701edba670bc07bd4fa3c5a2179c69560f23ed925594f3ca230ed780904e82c7f8f6ee737c059d1af79eef0c1f8e6a0fdace62e87d88ad3b345afb96ea7b26eb0426585ea064933c8b8ec9264d910dc1573363dbec0755de36221eb368c5b2703c254a4d3d29d1b247c46200f743fe5f04f4b8fec2f143ba1276cc4b2bd7802bfe6fa63a49eb7a77f3443db74e0c889441fc2154d85bdbc0bbdc80eca3852ff8c7d7738ff9ba9eaa18174f4f65c526940289717bb87d05fd4eeef1272065b4bfa4d6f31a1b23c50e1355988", "openwall"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
unsigned char output[24];
unsigned char *iv_in;
unsigned char iv_out[16];
int size, i;
AES_KEY akey;
#ifdef SIMD_COEF_32
int len[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, cur_salt->iterations, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]),
cur_salt->salt, 16, cur_salt->iterations, master[i], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
// memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ);
// See "sqlcipher_page_cipher" and "sqlite3Codec" functions
size = page_sz - reserve_sz;
iv_in = cur_salt->data + 16 + size; // initial 16 bytes are salt
memcpy(iv_out, iv_in, 16);
AES_set_decrypt_key(master[i], 256, &akey);
/*
* decrypting 8 bytes from offset 16 is enough since the
* verify_page function looks at output[16..23] only.
*/
AES_cbc_encrypt(cur_salt->data + 16, output + 16, 8, &akey, iv_out, AES_DECRYPT);
if (enpass_common_verify_page(output) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void enpass_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_enpass = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
enpass_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
enpass_common_valid,
fmt_default_split,
fmt_default_binary,
enpass_common_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
enpass_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
SybaseASE_fmt_plug.c | /*
* Unicode conversion enhancements by magnum, 2011. Licensed as below.
*
* Sybase ASE hash support for version 15.0.2 and above, based on hmailserver
* patch by James Nobis.
* Hash format description : http://marcellmajor.com/sybase_sha256.html
* Hacked together by Dhiru Kholia in February, 2011.
*
* This patch Copyright (C) 2010 by James Nobis - quel
* quel NOSPAM quelrod NOSPAM net, and it is herby released to the general
* public under the follow terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* Inspiration from the generic sha-1 and md5 (Copyright (c) 2010 by Solar Designer)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_SybaseASE;
#elif FMT_REGISTERS_H
john_register_one(&fmt_SybaseASE);
#else
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
//
//#define FORCE_GENERIC_SHA2 2
#include "sha2.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "SybaseASE"
#define FORMAT_NAME "Sybase ASE"
#define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 16 + 64)
#define PREFIX_LENGTH 6
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512
#endif
#endif // __MIC__
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
static struct fmt_tests SybaseASE_tests[] = {
{"0xc0074f9cc8c0d55d9803b0c0816e127f2a56ee080230af5b4ce3da1f3d9fcc5449fcfcf3fb9595eb8ea6", "test12"},
{"0xc0074BE393C06BE420AD541671aa5e6f1a19a4a73bb51c59f45790f0887cfb70e0599747c6844d4556b3", "a"},
{NULL}
};
#ifdef SIMD_COEF_32
// note, elements 3-7 are 'nulls', and are not in this array.
static UTF16 (*prep_key)[4][MAX_KEYS_PER_CRYPT][64 / sizeof(UTF16)];
static unsigned char *NULL_LIMB;
static int (*last_len);
static ARCH_WORD_32 (*crypt_cache)[BINARY_SIZE/4];
#else
static UTF16 (*prep_key)[518 / sizeof(UTF16)];
static SHA256_CTX (*prep_ctx);
#endif
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE/4];
static int kpc, dirty;
extern struct fmt_main fmt_SybaseASE;
static void init(struct fmt_main *self)
{
#if _OPENMP || SIMD_COEF_32
int i;
#endif
#ifdef _OPENMP
i = omp_get_max_threads();
self->params.min_keys_per_crypt *= i;
i *= OMP_SCALE;
self->params.max_keys_per_crypt *= i;
#endif
kpc = self->params.max_keys_per_crypt;
prep_key = mem_calloc_align(sizeof(*prep_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
crypt_out = mem_calloc_align(sizeof(*crypt_out),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
if (options.target_enc == UTF_8)
fmt_SybaseASE.params.plaintext_length = 125;
// will simply set SIMD stuff here, even if not 'used'
#ifdef SIMD_COEF_32
NULL_LIMB = mem_calloc_align(64, MAX_KEYS_PER_CRYPT, MEM_ALIGN_CACHE);
last_len = mem_calloc_align(sizeof(*last_len), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
for (i = 0; i < kpc/MAX_KEYS_PER_CRYPT; ++i) {
int j;
for (j = 0; j < MAX_KEYS_PER_CRYPT; ++j) {
prep_key[i][3][j][3] = 0x80;
prep_key[i][3][j][30] = 518<<3;
}
}
crypt_cache = mem_calloc_align(sizeof(*crypt_cache),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
#else
prep_ctx = mem_calloc(sizeof(*prep_key),
self->params.max_keys_per_crypt);
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(last_len);
MEM_FREE(NULL_LIMB);
MEM_FREE(crypt_cache);
#else
MEM_FREE(prep_ctx);
#endif
MEM_FREE(crypt_out);
MEM_FREE(prep_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
if(strncmp(ciphertext, "0xc007", 6)!=0)
return 0;
if(hexlen(&ciphertext[6]) != CIPHERTEXT_LENGTH - 6)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH+1];
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH+1);
strlwr(out);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
int i;
char *p = ciphertext + PREFIX_LENGTH + SALT_SIZE * 2;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE];
ARCH_WORD_32 u32;
} out;
int i;
char *p = ciphertext + PREFIX_LENGTH;
for (i = 0; i < sizeof(out.u8); i++) {
out.u8[i] = (atoi16[ARCH_INDEX(*p)] << 4) |atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out.u8;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_salt(void *salt)
{
int index;
for(index = 0; index < kpc; index++)
{
/* append salt at offset 510 */
#ifdef SIMD_COEF_32
int idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
memcpy(&prep_key[idx1][2][idx2][31], salt, 2);
memcpy(prep_key[idx1][3][idx2], &((unsigned char*)salt)[2], 6);
#else
memcpy((unsigned char*)prep_key[index] + 510,
(unsigned char*)salt, 8);
#endif
}
}
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
UTF16 tmp[PLAINTEXT_LENGTH+1];
int len2, len = enc_to_utf16_be(tmp, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
int idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
if (len < 0)
len = strlen16(tmp);
if (len > 32)
memcpy(prep_key[idx1][1][idx2], &tmp[32], (len-32)<<1);
len2 = len;
if (len2 > 32) len2 = 32;
memcpy(prep_key[idx1][0][idx2], tmp, len2<<1);
len2 = len;
while (len < last_len[index]) {
if (len < 32)
prep_key[idx1][0][idx2][len] = 0;
else
prep_key[idx1][1][idx2][len-32] = 0;
++len;
}
last_len[index] = len2;
#else
/* Clean slate */
memset(prep_key[index], 0, 2 * PLAINTEXT_LENGTH);
/* convert key to UTF-16BE, --encoding aware */
enc_to_utf16_be(prep_key[index], PLAINTEXT_LENGTH, (UTF8*)key,
strlen(key));
#endif
dirty = 1;
}
static char *get_key(int index)
{
UTF16 key_le[PLAINTEXT_LENGTH + 1];
#ifdef SIMD_COEF_32
int j, idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
if (last_len[index] < 32) {
for (j = 0; j < last_len[index]; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][0][idx2][j])>>16;
} else {
for (j = 0; j < 32; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][0][idx2][j])>>16;
for (; j < last_len[index]; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][1][idx2][j-32])>>16;
}
key_le[j] = 0;
#else
UTF16 *d = key_le;
UTF16 *s = prep_key[index];
// Byte-swap back to UTF-16LE
while ((*d++ = *s >> 8 | *s << 8))
s++;
#endif
return (char*)utf16_to_enc(key_le);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifndef SIMD_COEF_32
#pragma omp parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)
#else
#pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)
#endif
#endif
for(index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifndef SIMD_COEF_32
SHA256_CTX ctx;
if (dirty) {
SHA256_Init(&prep_ctx[index]);
SHA256_Update(&prep_ctx[index], prep_key[index], 510);
}
memcpy(&ctx, &prep_ctx[index], sizeof(ctx));
SHA256_Update(&ctx, prep_key[index] + 510/2, 8);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#else
unsigned char _OBuf[32*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *crypt;
uint32_t *crypt32;
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_CACHE);
crypt32 = (uint32_t*)crypt;
if (dirty) {
SIMDSHA256body(prep_key[index/MAX_KEYS_PER_CRYPT], crypt_cache[index], NULL, SSEi_FLAT_IN|SSEi_FLAT_RELOAD_SWAPLAST);
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][1]), crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
}
memcpy(crypt32, crypt_cache[index], 32*MAX_KEYS_PER_CRYPT);
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][2]), crypt32, crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);
// Last one with FLAT_OUT
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][3]), crypt_out[index], crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_OUT);
#endif
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (index = 0; index < count; index++)
if (*(ARCH_WORD_32 *)binary == *(ARCH_WORD_32 *)crypt_out[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp((char *)binary, (const char*)crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_SybaseASE = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
SybaseASE_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
deramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *deramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *deramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y, *rampparams;
double g,r0,r1,th0,th1,pm,goal,a,b,gb,r0b,r1b;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
g = IND(rampparams,0);
r0 = IND(rampparams,1);
r1 = IND(rampparams,2);
th0 = IND(rampparams,3); //Angle b/w r0 & r1
th1 = IND(rampparams,4); //Angle b/w r0 & g
pm = IND(rampparams,5);
gb = IND(rampparams,6); //Best-fit value
r0b = IND(rampparams,7); //Best-fit value
r1b = IND(rampparams,8); //Best-fit value
a = r0*cos(th1)*cos(th0) - r1*cos(th1)+sin(th0) + g*sin(th1) + r0b;
b = r0*sin(th0) + r1*cos(th0) + r1b;
goal = -r0*sin(th1)*cos(th0) + r1*sin(th1)*sin(th0) + g*cos(th1) + gb;
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
// IND(y,i) = goal+pm*exp(-(IND(x,i)*cost-sint)/r0 + (IND(x,i)*sint+cost)*r1);
IND(y,i) = goal+pm*exp(-a*IND(x,i) + b);
}
return PyArray_Return(y);
}
static char module_docstring[]="\
This function creates a model that fits a ramp using a rising exponential.\n\
\n\
Parameters\n\
----------\n\
goal: goal as x -> inf\n\
m: rise exp\n\
x0: time offset\n\
x: Array of time/phase points\n\
\n\
Returns\n\
-------\n\
This function returns an array of y values by combining an eclipse and a rising exponential\n\
\n\
Revisions\n\
---------\n\
2008-06-24 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\n\
2010-12-24 Nate Lust, UCF \n\
natelust at linux dot com\n\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated C extensions to python3, with support for python2.7\n\n\
";
static PyMethodDef module_methods[] = {
{"deramp",(PyCFunction)deramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
// static char module_docstring[] =
// "This module is used to calcuate the deramp";
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_deramp(void)
#else
initderamp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"deramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("deramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at the mozilla.org home page
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
// {
// enum {
// Defined = 1
// };
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
// };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
schedule.c | #include<stdio.h>
#include<omp.h>
#include<sys/time.h>
#include<unistd.h>
#include<math.h>
#define ARRAY_SIZE 1024768
int main(int argc, char *argv[]) {
int i;
int n = atoi(argv[1]);
int *a = (int *) malloc(sizeof(int) * n);
int *b = (int *) malloc(sizeof(int) * n);
int *c = (int *) malloc(sizeof(int) * n);
struct timeval tstart, tend;
gettimeofday(&tstart, NULL);
#pragma omp parallel for schedule(static)
for(i=0;i<n;++i) {
c[i] = a[i] + b[i];
}
gettimeofday(&tend, NULL);
printf("Time taken is:%d\n",(tend.tv_usec - tstart.tv_usec)
+ (tend.tv_sec - tstart.tv_sec) * 1000000);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__identity_uint64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fp64)
// op(A') function: GB (_unop_tran__identity_uint64_fp64)
// C type: uint64_t
// A type: double
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fp64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NLmean_propag2dirs_sspacing4_tspacing6_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing4.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing4_tspacing6_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 4
#define SCALE_FACTOR_TIME 6
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
GB_unop__identity_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_uint16)
// op(A') function: GB (_unop_tran__identity_uint32_uint16)
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_uint16)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__cimag_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cimag_fp64_fc64
// op(A') function: GB_unop_tran__cimag_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = cimag (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cimag (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = cimag (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CIMAG || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cimag_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = cimag (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cimag_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isle_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32)
// A*D function (colscale): GB (_AxD__isle_uint32)
// D*A function (rowscale): GB (_DxB__isle_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32)
// C=scalar+B GB (_bind1st__isle_uint32)
// C=scalar+B' GB (_bind1st_tran__isle_uint32)
// C=A+scalar GB (_bind2nd__isle_uint32)
// C=A'+scalar GB (_bind2nd_tran__isle_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
XT_HDFIO.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "hdf5.h"
#include <mpi.h>
#include "allocate.h"
#include "XT_Constants.h"
/*Instead of reading projection and weight data from binary files, use the function below to read those directly from
HDF files.*/
int32_t read_ProjWeightData (char data_filename[], float *projections, float *weights, int32_t datafile_row0, int32_t proj_rows, int32_t proj_cols, int32_t proj_start, int32_t proj_num, FILE* debug_file_ptr)
{
hid_t projs_file_id, weights_file_id, weights_dataset, projs_dataset, weights_dataspace, projs_dataspace, weights_memspace, projs_memspace;
hsize_t weights_dims[3], projs_dims[3], data_offset[3], data_count[3], mem_offset[3];
herr_t status;
int32_t i, j, k, m, n, weights_rank, projs_rank, extras_r, true_length_r, ratio_r, ratio_t, idx, nodes_rank, nodes_num, slice_num;
float ***projs_img, temp, ***weights_img;
MPI_Comm_size(MPI_COMM_WORLD, &nodes_num);
MPI_Comm_rank(MPI_COMM_WORLD, &nodes_rank);
/*HDF file pointers*/
projs_file_id = H5Fopen(data_filename, H5F_ACC_RDONLY, H5P_DEFAULT);
weights_file_id = H5Fopen(data_filename, H5F_ACC_RDONLY, H5P_DEFAULT);
/*dataset pointers*/
weights_dataset = H5Dopen(weights_file_id, STD_WEIGHTS_DATASET_NAME, H5P_DEFAULT);
projs_dataset = H5Dopen(projs_file_id, STD_PROJECTIONS_DATASET_NAME, H5P_DEFAULT);
weights_dataspace = H5Dget_space (weights_dataset); /* dataspace handle */
projs_dataspace = H5Dget_space (projs_dataset); /* dataspace handle */
/*Gives the number of dimensions in a dataset*/
weights_rank = H5Sget_simple_extent_ndims (weights_dataspace);
projs_rank = H5Sget_simple_extent_ndims (projs_dataspace);
if (weights_rank != 3)
fprintf(debug_file_ptr, "ERROR: The rank of the dataset %s is not 3\n", STD_WEIGHTS_DATASET_NAME);
if (projs_rank != 3)
fprintf(debug_file_ptr, "ERROR: The rank of the dataset %s is not 3\n", STD_PROJECTIONS_DATASET_NAME);
/*finds the dimension of the dataset and stores them in dims_wd and dims_proj*/
status = H5Sget_simple_extent_dims (weights_dataspace, weights_dims, NULL);
status = H5Sget_simple_extent_dims (projs_dataspace, projs_dims, NULL);
fprintf(debug_file_ptr, "Size of weights (%s) dataset is %dx%dx%d\n", STD_WEIGHTS_DATASET_NAME, (int32_t)weights_dims[0], (int32_t)weights_dims[1], (int32_t)weights_dims[2]);
fprintf(debug_file_ptr, "Size of projections (%s) dataset is %dx%dx%d\n", STD_PROJECTIONS_DATASET_NAME, (int32_t)projs_dims[0], (int32_t)projs_dims[1], (int32_t)projs_dims[2]);
if (weights_dims[0] != projs_dims[0] || weights_dims[1] != projs_dims[1] || weights_dims[2] != projs_dims[2])
{
fprintf(debug_file_ptr, "ERROR: Dimensions of weights dataset and projection datasets don't match\n");
return(-1);
}
extras_r = projs_dims[2] % proj_cols;
true_length_r = projs_dims[2] - extras_r;
ratio_r = true_length_r/proj_cols;
ratio_t = ratio_r;
proj_rows = proj_rows/nodes_num;
slice_num = proj_rows*ratio_t;
data_offset[0] = proj_start;
data_offset[1] = datafile_row0 + nodes_rank*slice_num;
data_offset[2] = extras_r/2;
data_count[0] = proj_num;
data_count[1] = slice_num;
data_count[2] = true_length_r;
if (data_offset[0] + data_count[0] > projs_dims[0] || data_offset[1] + data_count[1] > projs_dims[1] || data_offset[2] + data_count[2] > projs_dims[2])
{
fprintf(debug_file_ptr, "ERROR: Dataset size is inconsistent with inputs\n");
return(-1);
}
fprintf(debug_file_ptr, "Data sub-sampling factor along x-axis = %d, data sub-sampling factor along z-axis = %d\n", ratio_r, ratio_t);
weights_img = (float***)multialloc(sizeof(float), 3, data_count[0], data_count[1], data_count[2]);
projs_img = (float***)multialloc(sizeof(float), 3, data_count[0], data_count[1], data_count[2]);
/*Selects ROI in the dataset which should be read into arrays*/
status = H5Sselect_hyperslab (weights_dataspace, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
status = H5Sselect_hyperslab (projs_dataspace, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
weights_memspace = H5Screate_simple (3, data_count, NULL);
projs_memspace = H5Screate_simple (3, data_count, NULL);
mem_offset[0] = 0; mem_offset[1] = 0; mem_offset[2] = 0;
status = H5Sselect_hyperslab (weights_memspace, H5S_SELECT_SET, mem_offset, NULL, data_count, NULL);
status = H5Sselect_hyperslab (projs_memspace, H5S_SELECT_SET, mem_offset, NULL, data_count, NULL);
fprintf(debug_file_ptr,"Reading HDF5 dataset ...\n");
status = H5Dread(weights_dataset, H5T_NATIVE_FLOAT, weights_memspace, weights_dataspace, H5P_DEFAULT, &(weights_img[0][0][0]));
fprintf(debug_file_ptr,"Read dataset %s\n", STD_WEIGHTS_DATASET_NAME);
status = H5Dread(projs_dataset, H5T_NATIVE_FLOAT, projs_memspace, projs_dataspace, H5P_DEFAULT, &(projs_img[0][0][0]));
fprintf(debug_file_ptr,"Read dataset %s\n", STD_PROJECTIONS_DATASET_NAME);
#pragma omp parallel for private(j, k, m, n, temp, idx)
for (i = 0; i < proj_num; i++)
{
for (j = 0; j < proj_rows; j++)
for (k = 0; k < proj_cols; k++)
{
temp = 0;
idx = i*proj_rows*proj_cols + j*proj_cols + k;
weights[idx] = 0;
for (m = 0; m < ratio_t; m++)
{
for (n = 0; n < ratio_r; n++)
{
temp += projs_img[i][j*ratio_t + m][k*ratio_r + n];
weights[idx] += weights_img[i][j*ratio_t + m][k*ratio_r + n];
}
}
temp = temp/(ratio_r*ratio_t);
projections[idx] = BH_QUAD_COEF*temp*temp + temp;
}
}
fprintf(debug_file_ptr,"Generated projections and weight data with beamhardening coefficient of %f\n", (float)BH_QUAD_COEF);
/* if (TomoInputsPtr->Write2Tiff == 1)
{
dim[0] = 1; dim[1] = SinogramPtr->N_p; dim[2] = SinogramPtr->N_r; dim[3] = total_t_slices;
WriteMultiDimArray2Tiff (projs_filename, dim, 0, 3, 1, 2, &(Projection[0][0][0]), 0, TomoInputsPtr->debug_file_ptr);
WriteMultiDimArray2Tiff (weights_filename, dim, 0, 3, 1, 2, &(Weight[0][0][0]), 0, TomoInputsPtr->debug_file_ptr);
}
fprintf(TomoInputsPtr->debug_file_ptr,"gen_projection_4m_HDF: Wrote projections and weight data to binary files\n");
*/
multifree(weights_img, 3);
multifree(projs_img, 3);
H5Sclose(weights_memspace);
H5Sclose(projs_memspace);
H5Sclose(weights_dataspace);
H5Sclose(projs_dataspace);
H5Dclose(weights_dataset);
H5Dclose(projs_dataset);
H5Fclose(weights_file_id);
H5Fclose(projs_file_id);
return(0);
}
int32_t read_AngleTimeReconList (char data_filename[], float *proj_angles, float *proj_times, /*float *recon_times,*/ int32_t proj_start, int32_t proj_num, /*int32_t recon_num,*/ FILE *debug_file_ptr)
{
hid_t angles_file_id, times_file_id, /*recon_file_id,*/ angles_dataset, times_dataset /*,recon_dataset*/;
hid_t angles_dataspace, times_dataspace, /*recon_dataspace,*/ angles_memspace, times_memspace /*,recon_memspace*/;
hsize_t angles_dims[1], times_dims[1], /*recon_dims[1],*/ data_offset[1], data_count[1], mem_offset[1];
herr_t status;
int32_t angles_rank, times_rank/*, recon_rank*/;
angles_file_id = H5Fopen(data_filename, H5F_ACC_RDONLY, H5P_DEFAULT);
times_file_id = H5Fopen(data_filename, H5F_ACC_RDONLY, H5P_DEFAULT);
/*recon_file_id = H5Fopen(data_filename, H5F_ACC_RDONLY, H5P_DEFAULT);*/
angles_dataset = H5Dopen(angles_file_id, STD_PROJ_ANGLES_DATASET_NAME, H5P_DEFAULT);
times_dataset = H5Dopen(times_file_id, STD_PROJ_TIMES_DATASET_NAME, H5P_DEFAULT);
/*recon_dataset = H5Dopen(recon_file_id, STD_RECON_TIMES_DATASET_NAME, H5P_DEFAULT);*/
angles_dataspace = H5Dget_space (angles_dataset); /* dataspace handle */
times_dataspace = H5Dget_space (times_dataset); /* dataspace handle */
/*recon_dataspace = H5Dget_space (recon_dataset);*/ /* dataspace handle */
angles_rank = H5Sget_simple_extent_ndims (angles_dataspace);
times_rank = H5Sget_simple_extent_ndims (times_dataspace);
/*recon_rank = H5Sget_simple_extent_ndims (recon_dataspace);*/
if (angles_rank != 1)
fprintf(debug_file_ptr, "ERROR: The rank of the dataset %s is not 1\n", STD_PROJ_ANGLES_DATASET_NAME);
if (times_rank != 1)
fprintf(debug_file_ptr, "ERROR: The rank of the dataset %s is not 1\n", STD_PROJ_TIMES_DATASET_NAME);
/*if (recon_rank != 1)
fprintf(debug_file_ptr, "ERROR: The rank of the dataset %s is not 1\n", STD_RECON_TIMES_DATASET_NAME);*/
status = H5Sget_simple_extent_dims (angles_dataspace, angles_dims, NULL);
status = H5Sget_simple_extent_dims (times_dataspace, times_dims, NULL);
/*status = H5Sget_simple_extent_dims (recon_dataspace, recon_dims, NULL);*/
fprintf(debug_file_ptr, "Size of projection angles (%s) dataset is %d\n", STD_PROJ_ANGLES_DATASET_NAME, (int32_t)angles_dims[0]);
fprintf(debug_file_ptr, "Size of projection times (%s) dataset is %d\n", STD_PROJ_TIMES_DATASET_NAME, (int32_t)times_dims[0]);
/*fprintf(debug_file_ptr, "Size of reconstruction times (%s) dataset is %d\n", STD_RECON_TIMES_DATASET_NAME, (int32_t)recon_dims[0]);*/
if (times_dims[0] != angles_dims[0])
{
fprintf(debug_file_ptr, "ERROR: Size of angles and times list does not match\n");
return(-1);
}
data_offset[0] = proj_start;
data_count[0] = proj_num;
if (data_offset[0] + data_count[0] > angles_dims[0])
{
fprintf(debug_file_ptr, "ERROR: Dataset size is inconsistent with inputs\n");
return(-1);
}
status = H5Sselect_hyperslab (angles_dataspace, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
status = H5Sselect_hyperslab (times_dataspace, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
angles_memspace = H5Screate_simple (1, data_count, NULL);
times_memspace = H5Screate_simple (1, data_count, NULL);
mem_offset[0] = 0;
status = H5Sselect_hyperslab (angles_memspace, H5S_SELECT_SET, mem_offset, NULL, data_count, NULL);
status = H5Sselect_hyperslab (times_memspace, H5S_SELECT_SET, mem_offset, NULL, data_count, NULL);
status = H5Dread(angles_dataset, H5T_NATIVE_FLOAT, angles_memspace, angles_dataspace, H5P_DEFAULT, &(proj_angles[0]));
fprintf(debug_file_ptr, "Read dataset %s\n", STD_PROJ_ANGLES_DATASET_NAME);
status = H5Dread(times_dataset, H5T_NATIVE_FLOAT, times_memspace, times_dataspace, H5P_DEFAULT, &(proj_times[0]));
fprintf(debug_file_ptr, "Read dataset %s\n", STD_PROJ_TIMES_DATASET_NAME);
/*
data_offset[0] = 0;
data_count[0] = recon_num + 1;
if (data_offset[0] + data_count[0] > recon_dims[0])
{
fprintf(debug_file_ptr, "ERROR: Dataset size is inconsistent with inputs\n");
return(-1);
}
status = H5Sselect_hyperslab (recon_dataspace, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
recon_memspace = H5Screate_simple (1, data_count, NULL);
mem_offset[0] = 0;
status = H5Sselect_hyperslab (recon_memspace, H5S_SELECT_SET, mem_offset, NULL, data_count, NULL);
status = H5Dread(recon_dataset, H5T_NATIVE_FLOAT, recon_memspace, recon_dataspace, H5P_DEFAULT, &(recon_times[0]));
fprintf(debug_file_ptr, "Read dataset %s\n", STD_RECON_TIMES_DATASET_NAME);
*/
H5Sclose(angles_memspace);
H5Sclose(times_memspace);
/* H5Sclose(recon_memspace);*/
H5Sclose(angles_dataspace);
H5Sclose(times_dataspace);
/* H5Sclose(recon_dataspace);*/
H5Dclose(angles_dataset);
H5Dclose(times_dataset);
/* H5Dclose(recon_dataset);*/
H5Fclose(angles_file_id);
H5Fclose(times_file_id);
/* H5Fclose(recon_file_id);*/
return (0);
}
|
diagmv_x_dia_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
GB_binop__eq_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_int64
// A.*B function (eWiseMult): GB_AemultB__eq_int64
// A*D function (colscale): GB_AxD__eq_int64
// D*A function (rowscale): GB_DxB__eq_int64
// C+=B function (dense accum): GB_Cdense_accumB__eq_int64
// C+=b function (dense accum): GB_Cdense_accumb__eq_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_int64
// C=scalar+B GB_bind1st__eq_int64
// C=scalar+B' GB_bind1st_tran__eq_int64
// C=A+scalar GB_bind2nd__eq_int64
// C=A'+scalar GB_bind2nd_tran__eq_int64
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
smooth.multi.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <R.h>
#include <Rmath.h>
#include <Rinternals.h>
#if defined _OPENMP
#include <omp.h>
#endif
// find the maximum value
static inline double maxDouble( double * A, const size_t n) {
double p = - INFINITY;
size_t i;
for( i=0; i < n; i++ ) if( A[i] > p ) p = A[i];
return p;
}
// find the minimum value
static inline double minDouble( double * A, const size_t n) {
double p = INFINITY;
size_t i;
for( i=0; i < n; i++ ) if( A[i] < p ) p = A[i];
return p;
}
/* swap function */
static inline void swap( double * A, const size_t b, const size_t c) {
double d = A[b];
A[b] = A[c];
A[c] = d;
return;
}
/* partition function */
static inline size_t partition( double * A, const size_t left, const size_t right, const size_t pivot) {
size_t i,tmp;
double p = A[pivot];
swap(A,pivot,right); // swap value to end
tmp = left; // set the tmp value to the begining
for( i = left;i < right; i++)
if( A[i] < p ) {
swap(A,tmp,i); // if A[i] < p then move the result to tmp
tmp++; // then increment tmp by one
}
swap(A, right, tmp); // swap back the pivot
return tmp;
}
//Quickselect
// right should be equal to n-1 and left should be 0 when searching all of A
static inline double quantile_quickSelect( double * A, size_t left, size_t right, const size_t k) {
size_t i;
size_t pivot;
size_t n = right+1;
while(1) {
// 0. check for singleton
if (left==right) return( A[left] );
// 1. find initial pivot element
pivot = (left+right)/2;
// 2. Partition A by A[pivot];
pivot = partition( A, left, right, pivot);
if( k == pivot ) {
return( A[k] );
} else if( k < pivot ) {
right = pivot -1;
} else {
left = pivot +1;
}
}
// anything at this point is unreachable
return(NAN);
}
/* integer max */
int intMax ( int x, int y) {
return( ( x > y) ? x : y ) ;
}
/* integer min */
int intMin ( int x, int y) {
return( ( x < y) ? x : y ) ;
}
/* modal kernel */
double modalKernel(
int * x, /* raster image */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t M = 0;
size_t m = 0;
double maxValue = -INFINITY; /* used to determine max weighted value */
int mu = 0;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
size_t k_local;
size_t l_local;
int * maxArray = (int *) calloc( dRow * dCol, sizeof(int) );
double * maxArrayValue = (double *) calloc( dRow * dCol, sizeof(double) );
// handle tie breaks
double tieBreak;
double maxTie = runif(0.0,1.0);
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k*nCol + l] == NAN ) continue;
if( x[k*nCol + l] >= 0 ) { /* only run over non-negative values */
for(m=0; m < M; m++) {
/* increment found values */
if( maxArray[m] == x[k*nCol + l] ) {
maxArrayValue[m] += W[ k_local*dCol + l_local];
break;
}
}
/* if the value is not found add it */
if( m == M) {
maxArray[m] = x[k*nCol + l ];
maxArrayValue[m] = W[ k_local*dCol + l_local];
M++;
}
}
}
}
/* handle the all NA case */
if( M == 0 ) {
free(maxArray);
free(maxArrayValue);
return( -1 ) ;
}
/* determine max value */
for(m=0; m < M ; m++) {
if( maxArrayValue[m] > maxValue ) {
maxValue = maxArrayValue[m];
mu = maxArray[m];
// handle ties
} else if( maxArrayValue[m] == maxValue ) {
tieBreak = runif(0.0, 1.0);
if( tieBreak > maxTie ) {
maxValue = maxArrayValue[m];
mu = maxArray[m];
maxTie = tieBreak;
}
}
}
free(maxArray);
free(maxArrayValue);
return( mu ) ;
}
/* quantile kernel */
double quantileKernel(
double * x, /* naip image */
double * W, /* pre computed spatial weights */
double quantile, /* quantile */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t quantile_t; /* size_t quantile */
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double tmp;
// create a copy of the data, it will be mutated by the quantile call
double * tmpArray = (double *) calloc( dRow * dCol, sizeof(double) );
double mu;
int m = 0;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k*nCol + l] == NAN ) continue;
// only consider elements with positive valued weights
if( W[ k_local*dCol + l_local] > 0 ) {
tmpArray[m] = x[k*nCol + l];
m++;
}
}
}
if ( m > 0) {
/* get the index corresponding to the quantile */
/* first take care of edge cases */
if( quantile == 0.0 ) {
mu = minDouble( tmpArray, m);
} else if (quantile == 1.0) {
mu = maxDouble( tmpArray, m);
} else {
/* per Type 1 definition */
tmp = ((double) m) * quantile;
if( fabs( tmp - floor( tmp ) ) == 0.0 ) { // does R use machine epsilon?
//printf("g=0");
quantile_t = ((size_t) tmp) -1;
} else {
// printf("g=1");
quantile_t = ((size_t) tmp) ;
}
//Rprintf("m=%d quantile=%f quantile_t=%d\n", (int) m, quantile, (int) quantile_t );
//for(int ii=0; ii < m; ii++) Rprintf("%f,\n",tmpArray[ii]);
mu = quantile_quickSelect( tmpArray, 0, m-1, quantile_t);
//Rprintf("mu = %f\n", mu);
}
} else {
mu = NAN;
}
free(tmpArray);
return( mu ) ;
}
/* generic kernel */
double meanKernel(
double * x, /* naip image */
double * var, /* */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double mu = 0;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/* first pass variance */
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start -j + (dCol/2);
l < l_stop;
l++, l_local++
) {
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
mu += x[k * nCol + l] * W[ k_local*dCol + l_local];
w += W[ k_local*dCol + l_local];
}
}
return( mu/w ) ;
}
/* generic kernel */
double gaussianKernel(
double * x, /* naip image */
double hInv, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double w2 = 0;
double mu = 0;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/* first pass variance */
for( k=k_start; k < k_stop; k++) {
for( l=l_start; l < l_stop; l++) {
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
w = (x[k * nCol + l] - x[i * nCol + j]) *hInv;
w *= w;
mu += exp( -0.5 * w ) * 0.3989423 * hInv;
w2 += 1.0;
}
}
if( w2 > 0) mu = mu/w2;
return( mu ) ;
}
/* variance kernel */
double varKernel(
double * x, /* naip image */
double * mu, /* */
double * W, /* pre computed spatial weights */
size_t i, /* current location in columns */
size_t j, /* current location in rows */
size_t dRow,
size_t dCol,
size_t nRow, /* number of Rows */
size_t nCol /* number of Columns */
) {
/* adjustment that must be applied for edge effects */
size_t k, l;
size_t k_start;
size_t k_stop;
size_t l_start;
size_t l_stop;
double w = 0; /* total weight, used to make weight adjustments */
double var = 0; /* smoothed x value we are goinng to return */
double varTmp;
size_t k_local;
size_t l_local;
/* the starts */
if( i < dRow/2 ) {
k_start = 0;
} else {
k_start = i - dRow/2 ;
}
if( j < dCol/2 ) {
l_start = 0;
} else {
l_start = j - dCol/2 ;
}
/* the stops */
if( i + dRow/2 + 1 > nRow ) {
k_stop = nRow;
} else {
k_stop = i + dRow/2 + 1;
}
if( j + dCol/2 + 1 > nCol ) {
l_stop = nCol;
} else {
l_stop = j + dCol/2 + 1;
}
/* correctly handle NAN and INF cases */
if( x[i*nCol + j] == INFINITY ) return( INFINITY);
if( x[i*nCol + j] == -INFINITY ) return( -INFINITY);
if( x[i*nCol + j] == NAN ) return( NAN);
/*
* k_start creates a link to the original data
* k_local creates a link to the weights
*/
// second pass for variance
for(
k=k_start,
k_local=k_start - i + (dRow/2);
k < k_stop;
k++, k_local++
) {
for(
l=l_start,
l_local=l_start - j + (dCol/2);
l < l_stop;
l++, l_local++
) {
/* not mathematically correct, but good enough */
if( x[k * nCol + l] == INFINITY ) continue;
if( x[k * nCol + l] == -INFINITY ) continue;
if( x[k * nCol + l] == NAN ) continue;
if( mu[k * nCol + l] == INFINITY ) continue;
if( mu[k * nCol + l] == -INFINITY ) continue;
if( mu[k * nCol + l] == NAN ) continue;
varTmp = x[k * nCol + l] - mu[i* nCol + j];
var += varTmp * varTmp * W[ k_local*dCol + l_local];
w += W[ k_local*dCol + l_local] ;
}
}
return( var/w ) ;
}
void rSmoothLocalMoments(
double * x, /* this is the multi year naip images */
double * mu, /* this is the input/returned mu */
double * var, /* this is the input/returned Var */
double * WMu, /* weight */
double * WVar, /* weight */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr,
int * momentsPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
mu[i*nCol + j] = meanKernel( x, var, WMu, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
if( *momentsPtr > 1) {
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
var[i*nCol + j] = varKernel( x, mu, WMu, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
}
return;
}
void rSmoothCategorical(
int * x, /* this is the multi year naip images */
int * mu, /* this is the input/returned mu */
double * WMu, /* weight */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
if( x[i*nCol+j] >= 0 ) {
mu[i*nCol + j] = modalKernel( x, WMu, i,j,dRow,dCol,nRow,nCol);
} else {
mu[i*nCol + j] = x[i*nCol + j];
}
}
}
#pragma omp barrier
return;
}
void rSmoothLocalQuantile(
double * x, /* this is the multi year naip images */
double * mu, /* this is the input/returned mu */
double * WMu, /* weight */
double * quantile, /* quantile */
int * nRowPtr,
int * nColPtr,
int * dRowPtr,
int * dColPtr
) {
/* move R ints to size_t */
size_t dRow = *dRowPtr;
size_t dCol = *dColPtr;
size_t nRow = *nRowPtr;
size_t nCol = *nColPtr;
size_t i,j;
#pragma omp parallel for private(j)
for( i=0; i < nRow; i++) {
for( j=0; j < nCol; j++) {
mu[i*nCol + j] = quantileKernel( x, WMu, *quantile, i,j,dRow,dCol,nRow,nCol);
}
}
#pragma omp barrier
return;
}
|
GB_binop__rdiv_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64)
// A*D function (colscale): GB (_AxD__rdiv_uint64)
// D*A function (rowscale): GB (_DxB__rdiv_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64)
// C=scalar+B GB (_bind1st__rdiv_uint64)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint64)
// C=A+scalar GB (_bind2nd__rdiv_uint64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
target[3],
zero;
RectangleInfo
bounds;
register const Quantum
*r;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
r=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (r == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,r,&target[0]);
GetPixelInfo(image,&target[1]);
r=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (r != (const Quantum *) NULL)
GetPixelInfoPixel(image,r,&target[1]);
GetPixelInfo(image,&target[2]);
r=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (r != (const Quantum *) NULL)
GetPixelInfoPixel(image,r,&target[2]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) && (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
if ((image->colors) > 256) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) &&
(GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel) ||
(channel == MetaPixelChannel))
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (SetImageMonochrome(image,exception) == MagickFalse)
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
if (SetImageGray(image,exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
if (SetImageGray(image,exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
status=TransformImageColorspace(image,CMYKColorspace,exception);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
status=TransformImageColorspace(image,CMYKColorspace,exception);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
smart_sieve.c | #include "../include/smart_sieve.h"
#include "../include/linear_algebra.h"
#include "../include/matrix.h"
unsigned int smart_sieve(mpz_t n,
unsigned int* factor_base,
unsigned int base_dim,
pair* solutions,
mpz_t begin,
unsigned int interval,
unsigned int block_size,
unsigned int max_fact) {
mpz_t end;
mpz_init(end);
// questo processo mpz deve prendere A in [begin, end]
mpz_add_ui(end, begin, interval); // end = begin + interval
mpz_t n_root;
mpz_init(n_root);
mpz_sqrt(n_root, n);
int stop_flag = 0;
char stop_signal;
MPI_Request request;
MPI_Status status;
MPI_Irecv(&stop_signal, 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &request);
/* Inizio della parte di codice eseguita da ogni thread */
#pragma omp parallel
{
/* Dichiarazione dei buffer per la trasmissione */
unsigned int* buffer; // buffer per le fattorizzazioni
init_vector(&buffer, base_dim); // dim+1 per il flag di controllo della fine
unsigned char* buffer_as; // buffer per (A + s)
buffer_as = malloc(sizeof(unsigned char) * BUFFER_DIM);
/* Dichiarazione strutture dati per raccolta risultati */
unsigned int** exponents; // Matrice temporanea degli esponenti
init_matrix(&exponents, block_size, base_dim);
word** used_rows; // Vettore che segna quali esponenti sono stati inizializzati
init_matrix_l(&used_rows, 1, (block_size / N_BITS) + 1);
mpz_t* evaluated_poly; // Vettore temporaneo dei Q(A) valutati
init_vector_mpz(&evaluated_poly, block_size);
mpz_t* As; // A + s temporanei
init_vector_mpz(&As, block_size);
// Ogni thread calcola per A in [begin_thread, end_thread]
mpz_t begin_thread;
mpz_init(begin_thread);
mpz_t end_thread;
mpz_init(end_thread);
mpz_t last_block;
mpz_init(last_block);
mpz_t end_block;
mpz_init(end_block);
mpz_t intermed; // Valore appoggio
mpz_init(intermed);
mpz_t A; // Valore di A di Q(A) = (A + s)^2
mpz_init(A);
mpz_t l;
mpz_init(l);
mpz_t j;
mpz_init(j);
mpz_t begin_solution1;
mpz_init(begin_solution1);
mpz_t begin_solution2;
mpz_init(begin_solution2);
// Indice per accedere a Q(A) memorizzato in posizione (A - offset) tale che index < block_size
mpz_t index_mpz;
mpz_init(index_mpz);
unsigned int index;
int n_bytes; // num bit per vettore righe usate nella matrice
unsigned int i; // Indice generico
unsigned char go_on = 1;
unsigned int h; // Usato per copiare gli base_dim esponenti
unsigned int fact_count = 0; // Numero fattorizzazioni trovate
unsigned long k; // Indici generici
/*******************************************************************************************************/
max_fact += base_dim; // Le k + n fattorizzazioni da trovare
int threads = omp_get_num_threads();
int thread_id = omp_get_thread_num();
unsigned int dom_decomp = interval / (threads);
mpz_add_ui(begin_thread, begin, dom_decomp * thread_id); // begin_thread = begin + (dom_decomp * thread_id)
mpz_add_ui(end_thread, begin_thread, dom_decomp); // end_thread = begin_thread + dom_decomp
//gmp_printf("begin=%Zd, end=%Zd, interval=%d, block_size=%d\n", begin, end, interval, block_size);
//gmp_printf("%d) begin_thread=%Zd, end_thread=%Zd, dom_decmp=%d\n", thread_id, begin_thread, end_thread, dom_decomp);
//printf("###originali\n");
mpz_pair * solutions_ = malloc(sizeof(mpz_pair) * base_dim);
for(i = 0; i < base_dim; ++i) {
mpz_init(solutions_[i].sol1);
mpz_init(solutions_[i].sol2);
mpz_set_ui(solutions_[i].sol1, solutions[i].sol1);
mpz_set_ui(solutions_[i].sol2, solutions[i].sol2);
//gmp_printf("x_%d = %Zd, ", factor_base[i], solutions_[i].sol1);
//gmp_printf("y_%d = %Zd\n", factor_base[i], solutions_[i].sol2);
}
//printf("###ricalcolate\n");
if(mpz_cmp_ui(begin_thread, 0) != 0)
for(i = 0; i < base_dim; ++i) {
//unsigned int old1 = solutions_[i].sol1;
//unsigned int old2 = solutions_[i].sol2;
//unsigned int f = (startfrom - solutions[i].sol1) / factor_base[i] + 1;
//solutions[i].sol1 = solutions[i].sol1 + f * factor_base[i];
//f = (startfrom - solutions[i].sol2) / factor_base[i] + 1;
//solutions[i].sol2 = solutions[i].sol2 + f * factor_base[i];
while(mpz_cmp(solutions_[i].sol1, begin_thread) < 0)
mpz_add_ui(solutions_[i].sol1, solutions_[i].sol1, factor_base[i]);
while(mpz_cmp(solutions_[i].sol2, begin_thread) < 0)
mpz_add_ui(solutions_[i].sol2, solutions_[i].sol2, factor_base[i]);
//gmp_printf("x_%d = %Zd, ", factor_base[i], solutions_[i].sol1);
//gmp_printf("y_%d = %Zd\n", factor_base[i], solutions_[i].sol2);
}
//printf("###fine calcolo soluzioni \n");
mpz_sub_ui(last_block, end_thread, block_size); // last_block = end_thread - block_size
// for(l = begin_thread; l < last_block && go_on; l += block_size)
for(mpz_set(l, begin_thread);
(mpz_cmp(l, last_block) < 0) && go_on && !stop_flag;
mpz_add_ui(l, l, block_size)) {
for(i = 0; i < ((block_size / N_BITS) + 1); ++i) { // Reset righe usate
set_matrix_l(used_rows, 0, i, 0);
}
mpz_add_ui(end_block, l, block_size); // end_block = l + block_size
//gmp_printf("l=%Zd < %Zd [%Zd, %Zd)\n", l, last_block, l, end_block);
for(i = 0; i < block_size; ++i) { // Calcolo Q(A) e (A + s) per A in [l, l + block_size]
mpz_add_ui(A, l, i); // A = i + l
mpz_add(intermed, n_root, A); // A + s
mpz_set(As[i], intermed);
mpz_mul(intermed, intermed, intermed); // (A + s)^2
mpz_sub(evaluated_poly[i], intermed, n);
//gmp_printf("Q(%Zd)=%Zd, ", A, evaluated_poly[i]);
}
//printf("\n");
for(i = 0; i < base_dim && go_on && !stop_flag; ++i) {
/* Sieve con Xp */
// for(j = solutions_[i].sol1; j < end_block && go_on; j += factor_base[i])
for(mpz_set(j, solutions_[i].sol1);
(mpz_cmp(j, end_block) < 0) && go_on && !stop_flag;
mpz_add_ui(j, j, factor_base[i])) {
//gmp_printf("\txp) j=%Zd < %Zd [+=%d](j, end_block)\n", j, end_block, factor_base[i]);
mpz_sub(index_mpz, j, l);
index = mpz_get_ui(index_mpz); // Siccome (j - l) < block_size è un uint sicuro
while(mpz_divisible_ui_p(evaluated_poly[index], factor_base[i])) {
//gmp_printf("\t\tQ(A) = %Zd / %d = ", evaluated_poly[index], factor_base[i]);
if(get_k_i(used_rows, 0, index) == 0) { // Se non sono mai stati usati gli esponenti
for(k = 0; k < base_dim; ++k)
set_matrix(exponents, index, k, 0);
set_k_i(used_rows, 0, index, 1);
}
set_matrix(exponents, index, i, get_matrix(exponents, index, i) + 1); // ++exponents[j][i]
mpz_divexact_ui(evaluated_poly[index], evaluated_poly[index], factor_base[i]); // Q(A) = Q(A) / p
//gmp_printf("%Zd (poly[%d])\n", evaluated_poly[index], index);
}
}
mpz_set(solutions_[i].sol1, j); // solutions[i].sol1 = j; // Al prossimo giro ricominciamo da dove abbiamo finito
/* Sieve con Yp */
// for(j = solutions_[i].sol2; j < end_block && go_on; j += factor_base[i])
for(mpz_set(j, solutions_[i].sol2);
factor_base[i] != 2 && (mpz_cmp(j, end_block) < 0) && go_on && !stop_flag;
mpz_add_ui(j, j, factor_base[i])) {
//gmp_printf("\txp) j=%Zd < %Zd [+=%d](j, end_block)\n", j, end_block, factor_base[i]);
mpz_sub(index_mpz, j, l);
index = mpz_get_ui(index_mpz); // Siccome (j - l) < block_size è un uint sicuro
while(mpz_divisible_ui_p(evaluated_poly[index], factor_base[i])) {
//gmp_printf("\t\tQ(A) = %Zd / %d = ", evaluated_poly[index], factor_base[i]);
if(get_k_i(used_rows, 0, index) == 0) { // Se non sono mai stati usati gli esponenti
for(k = 0; k < base_dim; ++k)
set_matrix(exponents, index, k, 0);
set_k_i(used_rows, 0, index, 1);
}
set_matrix(exponents, index, i, get_matrix(exponents, index, i) + 1); // ++exponents[j][i]
mpz_divexact_ui(evaluated_poly[index], evaluated_poly[index], factor_base[i]); // Q(A) = Q(A) / p
//gmp_printf("%Zd (poly[%d])\n", evaluated_poly[index], index);
}
}
mpz_set(solutions_[i].sol2, j); // solutions[i].sol2 = j; // Al prossimo giro ricominciamo da dove abbiamo finito
}
// Spedisco le fattorizzazioni trovate in questo blocco
for(i = 0; i < block_size; ++i) {
if(mpz_cmp_ui(evaluated_poly[i], 1) == 0) {
++fact_count;
for(k = 0; k < base_dim; ++k)
buffer[k] = get_matrix(exponents, i, k);
/* MPI_Send */
#pragma omp critical
{
MPI_Send(buffer, base_dim, MPI_UNSIGNED, 0, ROW_TAG, MPI_COMM_WORLD);
n_bytes = (mpz_sizeinbase(As[i], 2) + 7) / 8;
*buffer_as = 0;
mpz_export(buffer_as, NULL, 1, 1, 1, 0, As[i]);
MPI_Send(buffer_as, n_bytes, MPI_UNSIGNED_CHAR, 0, AS_TAG, MPI_COMM_WORLD);
if(stop_flag == 0)
MPI_Test(&request, &stop_flag, &status);
//printf("%d) checking stop_signal = %d\n", thread_id, stop_signal);
}
}
}
}
mpz_clear(begin_thread);
mpz_clear(end_thread);
mpz_clear(end_block);
mpz_clear(intermed);
mpz_clear(A);
mpz_clear(l);
mpz_clear(j);
mpz_clear(begin_solution1);
mpz_clear(begin_solution2);
mpz_clear(index_mpz);
free(buffer);
free(buffer_as);
finalize_matrix(&exponents, base_dim);
finalize_matrix_l(&used_rows, (block_size / N_BITS) + 1);
for(unsigned int u = 0; u < block_size; u++) {
mpz_clear(evaluated_poly[u]);
mpz_clear(As[u]);
}
}
mpz_clear(end);
mpz_clear(n_root);
return stop_flag;
}
|
_spmv_G_NROWS_34000_G_NCOLS_34000_B_NROWS_4_B_NCOLS_MIN_12_B_NCOLS_MAX_20_B_NCOLS_STRIDE_4.c | /*@ begin PerfTuning (
def build {
arg build_command = 'icc -O3 -openmp -lm -I/disks/fast/papi/include -L/disks/fast/papi/lib -lpapi';
}
def performance_counter {
arg repetitions = 100;
}
def performance_params {
param UNROLL_FAC_OUT[] = [4];
param UNROLL_FAC_IN[] = [1,2,3,4,5,6,7,8];
param N_THREADS[] = [1,4];
param SIMD_TYPE[] = ['none','sse'];
param BLK_TYPE[] = ['inode'];
constraint simd_unroll_factor = (SIMD_TYPE=='none' or UNROLL_FAC_IN%2==0);
}
def input_params {
param G_NROWS[] = [32000,34000];
param G_NCOLS[] = [32000,34000];
param B_NROWS[] = [4];
param B_NCOLS_MIN[] = [12];
param B_NCOLS_MAX[] = [20];
param B_NCOLS_STRIDE[] = [4];
constraint square_x_y = (G_NROWS==G_NCOLS);
}
def input_vars {
arg decl_file = 'decl_code.h';
arg init_file = 'init_code.c';
}
def performance_test_code {
arg skeleton_code_file = 'skeleton_code.c';
}
def search
{
arg algorithm = 'Exhaustive';
}
) @*/
/**-- (Generated by Orio)
Best performance cost:
453325.000000
Tuned for specific problem sizes:
B_NCOLS_MAX = 20
B_NCOLS_MIN = 12
B_NCOLS_STRIDE = 4
B_NROWS = 4
G_NCOLS = 34000
G_NROWS = 34000
Best performance parameters:
BLK_TYPE = inode
N_THREADS = 4
SIMD_TYPE = none
UNROLL_FAC_IN = 6
UNROLL_FAC_OUT = 4
--**/
/*@ begin SpMV (
# SpMV computation: y = y + aa * x;
out_vector = y;
in_vector = x;
in_matrix = aa;
row_inds = ai;
col_inds = aj;
data_type = double;
init_val = 0;
total_rows = total_rows;
total_inodes = total_inodes;
inode_sizes = inode_sizes;
inode_rows = inode_rows;
# transformation parameters
out_unroll_factor = UNROLL_FAC_OUT;
in_unroll_factor = UNROLL_FAC_IN;
num_threads = N_THREADS;
simd = SIMD_TYPE; # 'none' (default), 'gcc', 'sse', 'xlc'
block_structure = BLK_TYPE; # 'none' (default), 'inode', 'bcsr' (still unsupported)
) @*/
{
register int n;
omp_set_num_threads(4);
#pragma omp parallel for shared(y,x,aa,ai,aj,total_inodes,inode_rows) private(n)
for (n=0; n<=total_inodes-1; n+=1) {
int start_row=inode_rows[n];
register int rlength=inode_rows[n+1]-start_row;
int first_col=ai[start_row];
register int clength=ai[start_row+1]-first_col;
double *yc=&y[start_row];
int *ajc=&aj[first_col];
double *aac=&aa[first_col];
register int i=0;
while (i<=rlength-4) {
double *aa1c=aac+clength,*aa2c=aa1c+clength,*aa3c=aa2c+clength;
double y0c=0,y1c=0,y2c=0,y3c=0;
register int j=0;
while (j<=clength-6) {
double x0=x[ajc[0]],x1=x[ajc[1]],x2=x[ajc[2]],x3=x[ajc[3]],x4=x[ajc[4]],x5=x[ajc[5]];
y0c += aac[0]*x0 + aac[1]*x1 + aac[2]*x2 + aac[3]*x3 + aac[4]*x4 + aac[5]*x5;
y1c += aa1c[0]*x0 + aa1c[1]*x1 + aa1c[2]*x2 + aa1c[3]*x3 + aa1c[4]*x4 + aa1c[5]*x5;
y2c += aa2c[0]*x0 + aa2c[1]*x1 + aa2c[2]*x2 + aa2c[3]*x3 + aa2c[4]*x4 + aa2c[5]*x5;
y3c += aa3c[0]*x0 + aa3c[1]*x1 + aa3c[2]*x2 + aa3c[3]*x3 + aa3c[4]*x4 + aa3c[5]*x5;
aac+=6; aa1c+=6; aa2c+=6; aa3c+=6;
ajc+=6;
j+=6;
}
while (j<=clength-1) {
double x0=x[ajc[0]];
y0c += aac[0]*x0;
y1c += aa1c[0]*x0;
y2c += aa2c[0]*x0;
y3c += aa3c[0]*x0;
aac+=1; aa1c+=1; aa2c+=1; aa3c+=1;
ajc+=1;
j+=1;
}
yc[0]=y0c; yc[1]=y1c; yc[2]=y2c; yc[3]=y3c;
yc+=4;
aac=aa3c;
ajc+=3*clength;
i+=4;
}
while (i<=rlength-1) {
double y0c=0;
register int j=0;
while (j<=clength-6) {
y0c += aac[0]*x[ajc[0]] + aac[1]*x[ajc[1]] + aac[2]*x[ajc[2]] + aac[3]*x[ajc[3]] + aac[4]*x[ajc[4]] + aac[5]*x[ajc[5]];
aac+=6;
ajc+=6;
j+=6;
}
while (j<=clength-1) {
y0c += aac[0]*x[ajc[0]];
aac+=1;
ajc+=1;
j+=1;
}
yc[0]=y0c;
yc+=1;
i+=1;
}
}
}
/*@ end @*/
/*@ end @*/
|
test6.c | int foo () {
printf("\nWorld");
return 1;
}
int bar(int i) {
return i;
}
int main () {
int i = 10;
#pragma omp parallel if (bar(foo()))
{
printf("\nHello");
}
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(32*t3+Nx+28,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
par_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
//fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
{
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
}
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(A_ext);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
HYPRE_Int *P_marker, *P_marker_offd;
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
HYPRE_Int interp_type,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("DirInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,
interp_type, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*------------------------------------------------
* Drop entries in interpolation matrix P
* max_elmts == 0 means no limit on rownnz
*------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
if (trunc_factor <= 0.0 && max_elmts == 0)
{
return 0;
}
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(P) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGInterpTruncationDevice(P, trunc_factor, max_elmts);
}
else
#endif
{
HYPRE_Int rescale = 1; // rescale P
HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping
return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type);
}
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_BigInt *new_col_map_offd;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for (i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (new_num_cols_offd)
{
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return hypre_error_flag;
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
/* RL */
HYPRE_Int
hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
/* csr's */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
/* arrays */
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int num_cols_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
/* CF marker off-diag part */
HYPRE_Int *CF_marker_offd = NULL;
/* func type off-diag part */
HYPRE_Int *dof_func_offd = NULL;
/* nnz */
HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd;
HYPRE_Int *marker_diag, *marker_offd = NULL;
/* local size */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
/* number of C-pts */
HYPRE_Int n_cpts = 0;
/* fine to coarse mapping: diag part and offd part */
HYPRE_Int *fine_to_coarse;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt total_global_cpts, my_first_cpt;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_int_buf_data = NULL;
//HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A);
//HYPRE_Int col_end = col_start + n_fine;
HYPRE_Int i, j, i1, j1, k1, index, start;
HYPRE_Int *max_abs_cij;
char *max_abs_diag_offd;
HYPRE_Real max_abs_aij, vv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
/* CF marker for the off-diag columns */
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* function type indicator for the off-diag columns */
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* if CommPkg of A is not present, create it */
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* number of sends to do (number of procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* send buffer, of size send_map_starts[num_sends]),
* i.e., number of entries to send */
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
/* copy CF markers of elements to send to buffer
* RL: why copy them with two for loops? Why not just loop through all in one */
index = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
/* loop through all elems to send_proc[i] */
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
/* CF marker of send_map_elemts[j] */
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* do a similar communication for dof_func */
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping,
* and find the most strongly influencing C-pt for each F-pt
*-----------------------------------------------------------------------*/
/* nnz in diag and offd parts */
cnt_diag = 0;
cnt_offd = 0;
max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
/* markers initialized as zeros */
marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
//fine_to_coarse[i] = my_first_cpt + n_cpts;
fine_to_coarse[i] = n_cpts;
n_cpts++;
continue;
}
/* mark all the strong connections: in S */
HYPRE_Int MARK = i + 1;
/* loop through row i of S, diag part */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
marker_diag[S_diag_j[j]] = MARK;
}
/* loop through row i of S, offd part */
if (num_procs > 1)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j];
marker_offd[j1] = MARK;
}
}
fine_to_coarse[i] = -1;
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
* Find this C-pt and save it
*--------------------------------------------------------------------------*/
/* if we failed to find any strong C-pt, mark this point as an 'n' */
char marker = 'n';
/* max abs val */
max_abs_aij = -1.0;
/* loop through row i of A, diag part */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
i1 = A_diag_j[j];
vv = fabs(A_diag_data[j]);
#if 0
/* !!! this is a hack just for code verification purpose !!!
it basically says:
1. if we see |a_ij| < 1e-14, force it to be 1e-14
2. if we see |a_ij| == the max(|a_ij|) so far exactly,
replace it if the j idx is smaller
Reasons:
1. numerical round-off for eps-level values
2. entries in CSR rows may be listed in different orders
*/
vv = vv < 1e-14 ? 1e-14 : vv;
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK &&
vv == max_abs_aij && i1 < max_abs_cij[i])
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
continue;
}
#endif
/* it is a strong C-pt and has abs val larger than what have seen */
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij)
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
/* offd part */
if (num_procs > 1)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
i1 = A_offd_j[j];
vv = fabs(A_offd_data[j]);
if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij)
{
/* mark it as an 'o' */
marker = 'o';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
}
max_abs_diag_offd[i] = marker;
if (marker == 'd')
{
cnt_diag ++;
}
else if (marker == 'o')
{
cnt_offd ++;
}
}
nnz_diag = cnt_diag + n_cpts;
nnz_offd = cnt_offd;
/*------------- allocate arrays */
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST);
/* not in ``if num_procs > 1'',
* allocation needed even for empty CSR */
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST);
/* redundant */
P_diag_i[0] = 0;
P_offd_i[0] = 0;
/* reset counters */
cnt_diag = 0;
cnt_offd = 0;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST);
big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
big_int_buf_data[index++] = my_first_cpt
+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* Second Pass: Populate P
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] >= 0)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity.
*--------------------------------------------------------------------*/
//P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[i];
P_diag_data[cnt_diag++] = 1.0;
}
else
{
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
*--------------------------------------------------------------------------*/
if (max_abs_diag_offd[i] == 'd')
{
/* on diag part of P */
j = max_abs_cij[i];
//P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[j];
P_diag_data[cnt_diag++] = 1.0;
}
else if (max_abs_diag_offd[i] == 'o')
{
/* on offd part of P */
j = max_abs_cij[i];
P_offd_j[cnt_offd] = j;
P_offd_data[cnt_offd++] = 1.0;
}
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
hypre_assert(cnt_diag == nnz_diag);
hypre_assert(cnt_offd == nnz_offd);
/* num of cols in the offd part of P */
num_cols_offd_P = 0;
/* marker_offd: all -1 */
for (i = 0; i < num_cols_A_offd; i++)
{
marker_offd[i] = -1;
}
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
if (marker_offd[i1] == -1)
{
num_cols_offd_P++;
marker_offd[i1] = 1;
}
}
/* col_map_offd_P: the col indices of the offd of P
* we first keep them be the offd-idx of A */
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST);
for (i = 0, i1 = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i] == 1)
{
tmp_map_offd[i1++] = i;
}
}
hypre_assert(i1 == num_cols_offd_P);
/* now, adjust P_offd_j to local idx w.r.t col_map_offd_R
* by searching */
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P);
/* search must succeed */
hypre_assert(k1 >= 0 && k1 < num_cols_offd_P);
P_offd_j[i] = k1;
}
/* change col_map_offd_P to global coarse ids */
for (i = 0; i < num_cols_offd_P; i++)
{
col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
/* Now, we should have everything of Parcsr matrix P */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */
total_global_cpts, /* global num of cols */
hypre_ParCSRMatrixColStarts(A), /* row_starts */
num_cpts_global, /* col_starts */
num_cols_offd_P, /* num cols offd */
nnz_diag,
nnz_offd);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* P does not own ColStarts, since A does */
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
/* create CommPkg of P */
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* free workspace */
hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST);
hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag,HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
cvsAdvDiff_bnd_omp.c | /* -----------------------------------------------------------------
* Programmer(s): Daniel Reynolds and Ting Yan @ SMU
* Based on cvsAdvDiff_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* solved using CVODES.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the BAND linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value or the number of threads from the
* environment value, run without arguments:
* % ./cvsAdvDiff_bnd_omp
* The environment variable can be over-ridden with a command line
* argument specifying the number of threads to use, e.g:
* % ./cvsAdvDiff_bnd_omp 5
* ----------------------------------------------------------------- */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Header files with a description of contents */
#include <cvodes/cvodes.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants */
#define XMAX RCONST(2.0) /* domain boundaries */
#define YMAX RCONST(1.0)
#define MX 10 /* mesh dimensions */
#define MY 5
#define NEQ MX*MY /* number of equations */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/* User-defined vector access macro IJth */
/* IJth is defined in order to isolate the translation from the
mathematical 2-dimensional structure of the dependent variable vector
to the underlying 1-dimensional storage.
IJth(vdata,i,j) references the element in the vdata array for
u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY.
The vdata array is obtained via the macro call vdata = NV_DATA_S(v),
where v is an N_Vector.
The variables are ordered by the y index j, then by the x index i. */
#define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY])
/* Type : UserData (contains grid constants) */
typedef struct {
realtype dx, dy, hdcoef, hacoef, vdcoef;
int nthreads;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, UserData data);
static void PrintHeader(realtype reltol, realtype abstol, realtype umax);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, char *funcname, int opt);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
realtype dx, dy, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
int num_threads;
u = NULL;
data = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = strtol(argv[1], NULL, 0);
/* Create an OpenMP vector */
u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */
if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_retval((void *)data, "malloc", 2)) return(1);
dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */
dy = data->dy = YMAX/(MY+1);
data->hdcoef = ONE/(dx*dx);
data->hacoef = HALF/(TWO*dx);
data->vdcoef = ONE/(dy*dy);
data->nthreads = num_threads;
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create banded SUNMatrix for use in linear solves -- since this will be factored,
set the storage bandwidth to be the sum of upper and lower bandwidths */
A = SUNBandMatrix(NEQ, MY, MY);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
/* Create banded SUNLinearSolver object for use by CVode */
LS = SUNLinSol_Band(u, A);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
/* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */
retval = CVodeSetLinearSolver(cvode_mem, LS, A);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
retval = CVodeSetJacFn(cvode_mem, Jac);
if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
printf("num_threads = %i\n\n", num_threads);
N_VDestroy_OpenMP(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
SUNLinSolFree(LS); /* Free the linear solver memory */
SUNMatDestroy(A); /* Free the matrix memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u,N_Vector udot, void *user_data)
{
realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff;
realtype *udata, *dudata;
int i, j;
UserData data;
udata = NV_DATA_OMP(u);
dudata = NV_DATA_OMP(udot);
/* Extract needed constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
/* Loop over all grid points. */
#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
/* Extract u at x_i, y_j and four neighboring points */
uij = IJth(udata, i, j);
udn = (j == 1) ? ZERO : IJth(udata, i, j-1);
uup = (j == MY) ? ZERO : IJth(udata, i, j+1);
ult = (i == 1) ? ZERO : IJth(udata, i-1, j);
urt = (i == MX) ? ZERO : IJth(udata, i+1, j);
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
IJth(dudata, i, j) = hdiff + hadv + vdiff;
}
}
return(0);
}
/* Jacobian routine. Compute J(t,u). */
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
sunindextype i, j, k;
realtype *kthCol, hordc, horac, verdc;
UserData data;
/*
The components of f = udot that depend on u(i,j) are
f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with
df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2)
df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1)
df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX)
df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1)
df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY)
*/
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
k = j-1 + (i-1)*MY;
kthCol = SUNBandMatrix_Column(J,k);
/* set the kth column of J */
SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc);
if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac;
if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac;
if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc;
if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc;
}
}
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
int i, j;
realtype x, y, dx, dy;
realtype *udata;
/* Extract needed constants from data */
dx = data->dx;
dy = data->dy;
/* Set pointer to data array in vector u. */
udata = NV_DATA_OMP(u);
/* Load initial profile into u vector */
#pragma omp parallel for default(shared) private(j, i, y, x)
for (j=1; j <= MY; j++) {
y = j*dy;
for (i=1; i <= MX; i++) {
x = i*dx;
IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*SUNRexp(FIVE*x*y);
}
}
}
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", MX, MY);
printf("Total system size = %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
int retval;
long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetNumJacEvals(cvode_mem, &nje);
check_retval(&retval, "CVodeGetNumJacEvals", 1);
retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVodeGetNumLinRhsEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n",
nst, nfe, nsetups, nfeLS, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n",
nni, ncfn, netf);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval < 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
GB_binop__times_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint16)
// A*D function (colscale): GB (_AxD__times_uint16)
// D*A function (rowscale): GB (_DxB__times_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint16)
// C=scalar+B GB (_bind1st__times_uint16)
// C=scalar+B' GB (_bind1st_tran__times_uint16)
// C=A+scalar GB (_bind2nd__times_uint16)
// C=A'+scalar GB (_bind2nd_tran__times_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT16 || GxB_NO_TIMES_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
summarystatsreduce.h | /*
* summarystatsreduce.h
*
* Created on: Jan 19, 2016
* Author: agibsonccc
*/
#ifndef SUMMARYSTATSREDUCE_H_
#define SUMMARYSTATSREDUCE_H_
#include <templatemath.h>
#include <dll.h>
#include <shape.h>
#include <op.h>
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifdef __CUDACC__
#include <helper_cuda.h>
#endif
#ifdef __JNI__
#include <jni.h>
#endif
namespace functions {
namespace summarystats {
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
template <typename T>
class SummaryStatsData {
public:
T n;
T min;
T max;
T mean;
T M2;
T M3;
T M4;
T bias;
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData() {
initialize();
}
// initialize to the identity element
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void initialize() {
n = mean = M2 = M3 = M4 = bias = 0;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void initWithValue(T val) {
n = 1;
min = val;
max = val;
mean = val;
M2 = 0;
M3 = 0;
M4 = 0;
bias = 0;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setValues(SummaryStatsData<T> *target) {
n = target->n;
min = target->min;
max = target->max;
mean = target->mean;
M2 = target->M2;
M3 = target->M3;
M4 = target->M4;
bias = target->bias;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T variance() {
if(n <= 1)
return 0.0;
return M2 / (n);
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T varianceBiasCorrected() {
if (this->n <= 1) {
return 0.0;
}
return (M2 - nd4j::math::nd4j_pow<T>(skewness(),2.0) / n ) / (n - 1.0);
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T variance_n() {
if(n <= 1)
return 0.0;
return M2 / n;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T skewness() { return nd4j::math::nd4j_sqrt<int>(n) * M3 / nd4j::math::nd4j_pow(M2, (T) 1.5); }
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T kurtosis() { return n * M4 / (M2 * M2); }
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getM2() {
return M2;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setM2(T m2) {
M2 = m2;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getM3() {
return M3;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setM3(T m3) {
M3 = m3;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getM4() {
return M4;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setM4(T m4) {
M4 = m4;
}
#ifdef __CUDACC__
__inline__ __host__ __device__
#elif defined(__GNUC__)
#endif
T getMax() {
return max;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setMax(T max) {
this->max = max;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getMean() {
return mean;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setMean(T mean) {
this->mean = mean;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getMin() {
return min;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setMin(T min) {
this->min = min;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getN() {
return n;
}
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
void setN(T n) {
this->n = n;
}
};
#ifdef __CUDACC__
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedSummaryStatsData {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedSummaryStatsData<float> {
__device__ SummaryStatsData<float> * getPointer() {
extern __shared__ SummaryStatsData<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedSummaryStatsData<double> {
__device__ SummaryStatsData<double> * getPointer() {
extern __shared__ SummaryStatsData<double> s_int6[];
return s_int6;
}
};
#endif
/**
* Standard deviation or variance 1 pass
*/
template<typename T>
class SummaryStatsReduce: public functions::ops::Op<T> {
protected:
bool biasCorrected = true;
public:
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsReduce(bool biasCorrected) {
this->biasCorrected = biasCorrected;
}
/**
*
* @param val
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> op(SummaryStatsData<T> val, T *extraParams) {
return val;
}
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
//calculate an update of the reduce operation
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> update(SummaryStatsData<T> x, SummaryStatsData<T> y,
T *extraParams) {
if(x.n == 0 && y.n > 0)
return y;
else if(x.n > 0 && y.n == 0)
return x;
SummaryStatsData<T> result;
T n = x.n + y.n;
T n2 = n * n;
T n3 = n2 * n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
T delta3 = delta2 * delta;
T delta4 = delta3 * delta;
//Basic number of samples (n), min, and max
result.n = n;
result.min = nd4j::math::nd4j_min(x.min, y.min);
result.max = nd4j::math::nd4j_max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
result.M3 = x.M3 + y.M3;
result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2;
result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n;
result.M4 = x.M4 + y.M4;
result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3;
result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2;
result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n;
return result;
}
/**
*
* @param f1
* @param f2
* @param extraParams
* @return
*/
//invoked when combining two kernels
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> merge(SummaryStatsData<T> f1, SummaryStatsData<T> f2, T *extraParams) = 0;
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getValue(SummaryStatsData<T> val) = 0;
/**
*
* @param reduction
* @param n
* @param xOffset
* @param dx
* @param incx
* @param extraParams
* @param result
* @return
*/
//post process result (for things like means etc)
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> postProcess(SummaryStatsData<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) = 0;
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> op(SummaryStatsData<T> d1, SummaryStatsData<T> d2, T *extraParams) = 0;
#ifdef __CUDACC__
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
virtual __device__ void aggregatePartials(SummaryStatsData<T> **sPartialsRef,int tid,int numElements,T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
SummaryStatsData<T> *sPartials = *sPartialsRef;
int floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
#pragma unroll
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<T> prev = sPartials[tid - floorPow2];
SummaryStatsData<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev,curr,extraParams);
}
__syncthreads();
}
#pragma unroll
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<T> curr = sPartials[tid];
SummaryStatsData<T> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr,next,extraParams);
}
__syncthreads();
}
}
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
__inline__ __device__ void transform(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) {
/**
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
//shared memory space for storing intermediate results
SummaryStatsData<T> *sPartials;
//functions::summarystats::SharedSummaryStatsData<T> holder;
sPartials = (SummaryStatsData<T> *) manager->getSharedReductionBuffer(); //holder.getPointer();
T startingVal = this->startingValue(dx);
#pragma unroll
for (int i = threadIdx.x; i < numElements; i += blockDim.x) {
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[i] = val;
}
__syncthreads();
//length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData <T> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
int *xStride = shape::stride(xShapeInfo);
char xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
} else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ shape::TAD *tad;
if (threadIdx.x == 0) {
tad = new(manager->getTADSpace()) shape::TAD(); //(xShapeInfo,dimension,dimensionLength)
tad->setExternalBuffers((void *) manager);
tad->initWithExternalTAD(tadOnlyShapeInfo, xShapeInfo, dimension, dimensionLength);
//tad->init(xShapeInfo,dimension,dimensionLength);
//tad->createTadOnlyShapeInfo();
}
__syncthreads();
if (dimensionLength > 1) {
int rank = shape::rank(tad->tadOnlyShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *xCoord = shape::cuMalloc(allocationBuffer, allocSize, manager);
*/
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < resultLength; r += gridDim.x) {
if (threadIdx.x == 0)
tad->createOffsetForBlock(r);
__syncthreads();
for(int i = threadIdx.x;i < shape::length(tad->tadOnlyShapeInfo); i += blockDim.x) {
shape::ind2subC(rank,tad->tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tad->tadOffsetForBlock, tad->tadShape, tad->tadStride, xCoord, rank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, shape::length(tad->tadOnlyShapeInfo)) ,extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = getValue(sPartials[threadIdx.x]);
}
}
} else {
int xLength = shape::length(xShapeInfo);
int tadLength = xLength / resultLength;
#pragma unroll
for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) {
if (threadIdx.x == 0)
tad->createOffsetForBlock(i);
__syncthreads();
int indexX = tad->tadOffsetForBlock + xElementWiseStride * threadIdx.x;
if (threadIdx.x < shape::length(tad->tadOnlyShapeInfo)) {
SummaryStatsData <T> indexVal;
indexVal.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = op(indexVal, extraParams);
}
#pragma unroll
for (int x = threadIdx.x + blockDim.x; x < tadLength; x+= blockDim.x) {
indexX = tad->tadOffsetForBlock + x * xElementWiseStride;
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, shape::length(tad->tadOnlyShapeInfo)) ,extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = getValue(sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if(xElementWiseStride >= 1) {
for(int i = tid;i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction,indexVal2, extraParams);
}
} else {
int rank = shape::rank(xShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *ind2sub = shape::cuMalloc(allocationBuffer, allocSize, manager); //(int *) malloc(sizeof(int) * rank);
*/
int ind2sub[MAX_RANK];
#pragma unroll
for(int i = tid;i < n; i += blockDim.x * gridDim.x) {
shape::ind2sub(rank,shape::shapeOf(xShapeInfo),i,ind2sub);
int offset = shape::getOffset(0,xShapeInfo,shape::stride(xShapeInfo),ind2sub,rank);
SummaryStatsData <T> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction,indexVal2, extraParams);
}
}
__syncthreads();
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials(&sPartials, threadIdx.x,blockDim.x ,extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__syncthreads();
__threadfence();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[4096], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[4096] = 0;
SummaryStatsData<T> *pBuffer = (SummaryStatsData<T> *) reductionBuffer;
T startingVal = this->startingValue(dx);
SummaryStatsData<T> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x], pBuffer[i] ,extraParams);
}
__syncthreads();
aggregatePartials(&sPartials, threadIdx.x,gridDim.x,extraParams);
__syncthreads();
if (tid == 0) {
result[0] = getValue(sPartials[0]);
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *) reductionBuffer;
tc[4096] = 0;
result[0] = result[0] = getValue(sPartials[0]);
}
}
}
}
#endif
/**
* CPU interface
* @param x the input
* @param xShapeInfo the shape information for input
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information
* for result
*/
virtual
#ifdef __CUDACC__
inline __host__
#elif defined(__GNUC__)
#endif
void exec(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo) {
result[0] = this->execScalar(x,xShapeInfo,extraParams);
}
/**
* CPU interface
* @param x the input
* @param xShapeInfo the shape information for input
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information
* for result
*/
virtual
#ifdef __CUDACC__
inline __host__
#elif defined(__GNUC__)
#endif
T execScalar(T *x,
int *xShapeInfo,
T *extraParams) {
SummaryStatsData<T> startingIndex;
startingIndex.initialize();
int length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if (xElementWiseStride == 1) {
for (int i = 0; i < length; i++) {
SummaryStatsData<T> curr;
curr.initWithValue(x[i]);
startingIndex = update(startingIndex, curr,
extraParams);
}
T finalVal = this->getValue(startingIndex);
return finalVal;
} else {
for (int i = 0; i < length; i++) {
SummaryStatsData<T> curr;
curr.initWithValue(x[i]);
startingIndex = update(startingIndex, curr,
extraParams);
}
T finalVal = this->getValue(startingIndex);
return finalVal;
}
}
/**
* Dimension wise execution for CPU
* @param x the input
* @param xShapeInfo the shape information
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfoBuffer the shape information
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
*/
virtual
#ifdef __CUDACC__
inline __host__
#elif defined(__GNUC__)
#endif
void exec(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfoBuffer,
int *dimension, int dimensionLength) {
if (shape::isScalar(resultShapeInfoBuffer)) {
result[0] = execScalar(x, xShapeInfo, extraParams);
return;
}
shape::TAD tad(xShapeInfo,dimension,dimensionLength);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
//no-op
if(tad.dimensionLength < 1)
return;
int resultLength = shape::length(resultShapeInfoBuffer);
//pre squeezed: this is for keeping the pointer to the original
//shape information for tad offset
//the squeezed information doesn't render the right strides for
//tad offset
if (resultLength == 1 || dimensionLength == shape::rank(xShapeInfo) || tad.wholeThing) {
result[0] = execScalar(x, xShapeInfo, extraParams);
return;
}
if(!(shape::elementWiseStride(tad.tadOnlyShapeInfo) > 0 && (tad.numTads == 1 || shape::isVector(tad.tadOnlyShapeInfo) ||
shape::isScalar(tad.tadOnlyShapeInfo) || tad.wholeThing))) {
/**
* The element wise stride belong longs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along long arr
* we can use arr.stride(1) as a representation
* along long which to iterate.
*/
int *tadShapeShapeInfo = tad.tadOnlyShapeInfo;
int *xShape = shape::shapeOf(tadShapeShapeInfo);
int *xStride = shape::stride(tadShapeShapeInfo);
int rank = shape::rank(tadShapeShapeInfo);
#pragma omp parallel for
for (int i = 0; i < resultLength; i++) {
int offset = tad.tadOffsets[i];
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int rankIter = rank;
int xStridesIter[MAX_RANK];
T *xPointer = x + offset;
SummaryStatsData<T> comp;
comp.initWithValue(0.0);
if(PrepareOneRawArrayIter<T>(rankIter,
xShape,
xPointer,
xStride,
&rankIter,
shapeIter,
&xPointer,
xStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
/* Process the innermost dimension */
SummaryStatsData<T> comp2;
comp2.initWithValue(xPointer[0]);
comp = update(comp, comp2, extraParams);
} ND4J_RAW_ITER_ONE_NEXT(dim,
rank,
coord,
shapeIter,
xPointer,
xStridesIter);
}
else {
printf("Unable to prepare array\n");
}
result[i] = getValue(comp);
}
}
else {
int tadElementWiseStride = shape::elementWiseStride(tad.tadOnlyShapeInfo);
int tadLength = shape::length(tad.tadOnlyShapeInfo);
#pragma omp parallel for
for(int i = 0; i < resultLength; i++) {
int baseOffset = tad.tadOffsets[i];
SummaryStatsData<T> comp;
comp.initWithValue(x[baseOffset]);
#pragma omp simd
for(int j = 1; j < tadLength; j++) {
SummaryStatsData<T> comp2;
comp2.initWithValue(x[baseOffset + tadElementWiseStride * j]);
comp = update(comp, comp2, extraParams);
}
result[i] = getValue(comp);
}
}
}
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
T startingValue(T *input) {
return 0;
}
virtual inline
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
//no extra params aggregation needs to happen
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~SummaryStatsReduce() {
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsReduce() {
}
};
namespace ops {
/**
* var(x)
*/
template<typename T>
class Variance: public functions::summarystats::SummaryStatsReduce<T> {
public:
/**
*
* @param val
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> op(
functions::summarystats::SummaryStatsData<T> val, T *extraParams) override {
return val;
}
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getValue(SummaryStatsData<T> val) {
if (this->biasCorrected) {
T ret = val.varianceBiasCorrected();
if(ret < 0)
return val.variance();
return ret;
}
return val.variance();
}
/**
*
* @param f1
* @param f2
* @param extraParams
* @return
*/
//invoked when combining two kernels
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> merge(
functions::summarystats::SummaryStatsData<T> f1,
functions::summarystats::SummaryStatsData<T> f2, T *extraParams) override {
return this->update(f1,f2,extraParams);
}
/**
*
* @param reduction
* @param n
* @param xOffset
* @param dx
* @param incx
* @param extraParams
* @param result
* @return
*/
//post process result (for things like means etc)
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> postProcess(
functions::summarystats::SummaryStatsData<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) override {
return reduction;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,
functions::summarystats::SummaryStatsData<T> d2, T *extraParams) override {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~Variance() {
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
Variance() {
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
Variance(bool biasCorrected) {
this->biasCorrected = biasCorrected;
}
};
/**
* std(x)
*/
template<typename T>
class StandardDeviation: public functions::summarystats::SummaryStatsReduce<T> {
public:
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
T getValue(SummaryStatsData<T> val) {
if (this->biasCorrected) {
T ret = val.varianceBiasCorrected();
if(ret < 0)
return nd4j::math::nd4j_sqrt(val.variance());
else
return nd4j::math::nd4j_sqrt(ret);
}
return nd4j::math::nd4j_sqrt(val.variance());
}
/**
*
* @param val
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> op(
functions::summarystats::SummaryStatsData<T> val, T *extraParams) override {
return val;
}
/**
*
* @param f1
* @param f2
* @param extraParams
* @return
*/
//invoked when combining two kernels
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> merge(
functions::summarystats::SummaryStatsData<T> f1,
functions::summarystats::SummaryStatsData<T> f2, T *extraParams) override {
return this->update(f1,f2,extraParams);
}
/**
*
* @param reduction
* @param n
* @param xOffset
* @param dx
* @param incx
* @param extraParams
* @param result
* @return
*/
//post process result (for things like means etc)
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
functions::summarystats::SummaryStatsData<T> postProcess(
functions::summarystats::SummaryStatsData<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) override {
return reduction;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
inline __host__ __device__
#elif defined(__GNUC__)
#endif
SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,
functions::summarystats::SummaryStatsData<T> d2, T *extraParams) override {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
virtual ~StandardDeviation() {
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
StandardDeviation() {
}
#ifdef __CUDACC__
__host__ __device__
#elif defined(__GNUC__)
#endif
StandardDeviation(bool biasCorrected) {
this->biasCorrected = biasCorrected;
}
};
}
template<typename T>
class SummaryStatsReduceOpFactory {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
SummaryStatsReduceOpFactory() {
}
#ifdef __CUDACC__
__inline__ __device__
functions::summarystats::SummaryStatsReduce<T> * getOp(int op, bool biasCorrected, unsigned char *buffer) {
#else
functions::summarystats::SummaryStatsReduce<T> * getOp(int op, bool biasCorrected) {
#endif
if (op == 0) {
#ifdef __CUDACC__
return new(buffer) ops::Variance<T>(biasCorrected);
#else
return new ops::Variance<T>(biasCorrected);
#endif
} else if (op == 1) {
#ifdef __CUDACC__
return new(buffer) ops::StandardDeviation<T>(biasCorrected);
#else
return new ops::StandardDeviation<T>(biasCorrected);
#endif
}
return nullptr;
}
#ifdef __CUDACC__
__inline__ __device__
functions::summarystats::SummaryStatsReduce<T> * getOp(int op, unsigned char * buffer) {
return this->getOp(op,true, buffer);
}
#else
functions::summarystats::SummaryStatsReduce<T> * getOp(int op) {
return this->getOp(op,true);
}
#endif
};
}
}
#ifdef __CUDACC__
/**
* The driver interface for summary stats
* @param op the op number
* @param n the length
* @param dx the input
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information for the result
* @param gpuInformation the gpu information such as block dim, grid dim and shared memory
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
* @param postProcessOrNot whether to post process or not
*/
template <typename T>
__device__ void summaryStatsReduceGeneric(
int op,
T *dx,
int *xShapeInfo, int xRank,
T *extraParams,
T *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength, int postProcessOrNot,bool biasCorrected, int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo) {
__shared__ functions::summarystats::SummaryStatsReduce<T> *indexReduce;
__shared__ functions::summarystats::SummaryStatsReduceOpFactory<T> *newOpFactory;
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::summarystats::SummaryStatsReduceOpFactory<T>), sizeof(functions::summarystats::SummaryStatsReduce<T>), sizeof(shape::TAD), xRank);
newOpFactory = new(manager->getFactorySpace()) functions::summarystats::SummaryStatsReduceOpFactory<T>();
indexReduce = newOpFactory->getOp(op,biasCorrected, manager->getFunctionSpace());
}
__syncthreads();
indexReduce->transform(
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationBuffer,
reductionBuffer,
manager, tadOnlyShapeInfo);
}
/**
* The driver interface for summary stats
* @param op the op number
* @param n the length
* @param dx the input
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information for the result
* @param gpuInformation the gpu information such as block dim, grid dim and shared memory
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
* @param postProcessOrNot whether to post process or not
*/
__global__ void summaryStatsReduceDouble(
int op,
double *dx,
int *xShapeInfo, int xRank,
double *extraParams,
double *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot,
bool biasCorrected, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo) {
summaryStatsReduceGeneric<double>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo);
}
/**
* The driver interface for summary stats
* @param op the op number
* @param n the length
* @param dx the input
* @param xShapeInfo the shape information for x
* @param extraParams the extra parameters
* @param result the result buffer
* @param resultShapeInfo the shape information for the result
* @param gpuInformation the gpu information such as block dim, grid dim and shared memory
* @param dimension the dimension to execute along long
* @param dimensionLength the length of the dimension
* @param postProcessOrNot whether to post process or not
*/
__global__ void summaryStatsReduceFloat(
int op,
float *dx,
int *xShapeInfo, int xRank,
float *extraParams,
float *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot,bool biasCorrected,int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo) {
summaryStatsReduceGeneric<float>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot,biasCorrected, allocationBuffer, reductionBuffer, tadOnlyShapeInfo);
}
#endif
#endif /* SUMMARYSTATSREDUCE_H_ */
|
omp_parallel_for_reduction.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for reduction directive with all its options.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for reduction</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_reduction</ompts:testcode:functionname>(FILE * logFile){
int sum=0;
int known_sum;
double dsum=0;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-9;
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
int diff;
double ddiff;
int product=1;
int known_product;
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int logic_and=1;
int logic_or=0;
int bit_and=1;
int bit_or=0;
int exclusiv_bit_or=0;
int logics[LOOPCOUNT];
int i;
double dpt;
int result=0;
dt = 1./3.;
known_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(+:sum)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for (i=1;i<=LOOPCOUNT;i++)
{
sum=sum+i;
}
if(known_sum!=sum)
{
result++;
fprintf(logFile,"Error in sum with integers: Result was %d instead of %d\n",sum,known_sum);
}
diff = (LOOPCOUNT*(LOOPCOUNT+1))/2;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(-:diff)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for (i=1;i<=LOOPCOUNT;++i)
{
diff=diff-i;
}
if(diff != 0)
{
result++;
fprintf(logFile,"Error in difference with integers: Result was %d instead of 0.\n",diff);
}
/* Tests for doubles */
dsum=0;
dpt=1;
for (i=0;i<DOUBLE_DIGITS;++i)
{
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(+:dsum)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for (i=0;i<DOUBLE_DIGITS;++i)
{
dsum += pow(dt,i);
}
if( fabs(dsum-dknown_sum) > rounding_error )
{
result++;
fprintf(logFile,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum);
}
dpt=1;
for (i=0;i<DOUBLE_DIGITS;++i)
{
dpt*=dt;
}
fprintf(logFile,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(-:ddiff)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for (i=0;i<DOUBLE_DIGITS;++i)
{
ddiff -= pow(dt,i);
}
if( fabs(ddiff) > rounding_error)
{
result++;
fprintf(logFile,"Error in Difference with doubles: Result was %E instead of 0.0\n",ddiff);
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(*:product)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=1;i<=MAX_FACTOR;i++)
{
product *= i;
}
known_product = KNOWN_PRODUCT;
if(known_product != product)
{
result++;
fprintf(logFile,"Error in Product with integers: Result was %d instead of %d\n\n",product,known_product);
}
for(i=0;i<LOOPCOUNT;i++)
{
logics[i]=1;
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
logic_and = (logic_and && logics[i]);
}
if(!logic_and)
{
result++;
fprintf(logFile,"Error in logic AND part 1.\n");
}
logic_and = 1;
logics[LOOPCOUNT/2]=0;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
logic_and = logic_and && logics[i];
}
if(logic_and)
{
result++;
fprintf(logFile,"Error in logic AND part 2.\n");
}
for(i=0;i<LOOPCOUNT;i++)
{
logics[i]=0;
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
logic_or = logic_or || logics[i];
}
if(logic_or)
{
result++;
fprintf(logFile,"Error in logic OR part 1.\n");
}
logic_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
logic_or = logic_or || logics[i];
}
if(!logic_or)
{
result++;
fprintf(logFile,"Error in logic OR part 2.\n");
}
for(i=0;i<LOOPCOUNT;++i)
{
logics[i]=1;
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
bit_and = (bit_and & logics[i]);
}
if(!bit_and)
{
result++;
fprintf(logFile,"Error in BIT AND part 1.\n");
}
bit_and = 1;
logics[LOOPCOUNT/2]=0;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
bit_and = bit_and & logics[i];
}
if(bit_and)
{
result++;
fprintf(logFile,"Error in BIT AND part 2.\n");
}
for(i=0;i<LOOPCOUNT;i++)
{
logics[i]=0;
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
bit_or = bit_or | logics[i];
}
if(bit_or)
{
result++;
fprintf(logFile,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
bit_or = bit_or | logics[i];
}
if(!bit_or)
{
result++;
fprintf(logFile,"Error in BIT OR part 2\n");
}
for(i=0;i<LOOPCOUNT;i++)
{
logics[i]=0;
}
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if(exclusiv_bit_or)
{
result++;
fprintf(logFile,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck>
for(i=0;i<LOOPCOUNT;++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if(!exclusiv_bit_or)
{
result++;
fprintf(logFile,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
</ompts:testcode>
</ompts:test>
|
GB_unop__identity_uint64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_fp32
// op(A') function: GB_unop_tran__identity_uint64_fp32
// C type: uint64_t
// A type: float
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_fp32
(
uint64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residual_based_implicit_time_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME )
#define KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedImplicitTimeScheme
* @ingroup KratosCore
* @brief This is the base class for the implicit time schemes
* @details Other implicit schemes should derive from this one. With the use of this base scheme it is possible to reduce code duplication
* @tparam TSparseSpace The sparse space considered
* @tparam TDenseSpace The dense space considered
* @see Scheme
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace >
class ResidualBasedImplicitTimeScheme
: public Scheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedImplicitTimeScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedImplicitTimeScheme );
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
/// DoF array type definition
typedef typename BaseType::DofsArrayType DofsArrayType;
/// DoF vector type definition
typedef typename Element::DofsVectorType DofsVectorType;
/// Data type definition
typedef typename BaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// Nodes containers definition
typedef ModelPart::NodesContainerType NodesArrayType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// Index type definition
typedef std::size_t IndexType;
///@}
///@name Life Cycle
///@{
/**
* Constructor.
* The implicit method method
*/
explicit ResidualBasedImplicitTimeScheme()
:BaseType()
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mMatrix.M.resize(num_threads);
mMatrix.D.resize(num_threads);
}
/**
* @brief Constructor. The implicit method method
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedImplicitTimeScheme(Parameters ThisParameters)
:ResidualBasedImplicitTimeScheme()
{
this->ValidateAndAssignParameters(ThisParameters);
this->AssignSettings(ThisParameters);
}
/** Copy Constructor.
*/
explicit ResidualBasedImplicitTimeScheme(ResidualBasedImplicitTimeScheme& rOther)
:BaseType(rOther)
,mMatrix(rOther.mMatrix)
{
}
/**
* Clone
*/
typename BaseType::Pointer Clone() override
{
return Kratos::make_shared<ResidualBasedImplicitTimeScheme>(*this);
}
/** Destructor.
*/
~ResidualBasedImplicitTimeScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief It initializes a non-linear iteration (for the element)
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->InitializeNonLinearIteration(r_current_process_info);
}
KRATOS_CATCH( "" );
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @param pCurrentCondition The condition to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Condition::Pointer pCurrentCondition,
ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_process_info = rCurrentProcessInfo;
pCurrentCondition->InitializeNonLinearIteration(r_const_process_info);
}
/**
* @brief It initializes a non-linear iteration (for an individual element)
* @param pCurrentElement The element to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Element::Pointer pCurrentElement,
ProcessInfo& rCurrentProcessInfo
) override
{
const auto& r_const_process_info = rCurrentProcessInfo;
pCurrentElement->InitializeNonLinearIteration(r_const_process_info);
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system
* @param rCurrentElement The element to compute
* @param LHS_Contribution The LHS matrix contribution
* @param RHS_Contribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
//rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
rCurrentElement.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.EquationIdVector(EquationId,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(mMatrix.M[this_thread],rCurrentProcessInfo);
rCurrentElement.CalculateDampingMatrix(mMatrix.D[this_thread],rCurrentProcessInfo);
AddDynamicsToLHS(LHS_Contribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.CalculateSystemContributions");
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentElement The element to compute
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current element
// rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the element considered
rCurrentElement.CalculateRightHandSide(rRHSContribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentElement.CalculateDampingMatrix(mMatrix.D[this_thread],rCurrentProcessInfo);
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
AddDynamicsToRHS (rCurrentElement, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Calculate_RHS_Contribution");
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCurrentCondition The condition to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current condition
//rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the condition considered
rCurrentCondition.CalculateLocalSystem(rLHSContribution,rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentCondition.CalculateDampingMatrix(mMatrix.D[this_thread], rCurrentProcessInfo);
AddDynamicsToLHS(rLHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
AddDynamicsToRHS(rCurrentCondition, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.CalculateSystemContributions");
}
/**
* @brief Functions that calculates the RHS of a "condition" object
* @param rCurrentCondition The condition to compute
* @param rRHSContribution The RHS vector contribution
* @param rEquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Condition& rCurrentCondition,
LocalSystemVectorType& rRHSContribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY;
const IndexType this_thread = OpenMPUtils::ThisThread();
// Initializing the non linear iteration for the current condition
//rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Basic operations for the condition considered
rCurrentCondition.CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(mMatrix.M[this_thread], rCurrentProcessInfo);
rCurrentCondition.CalculateDampingMatrix(mMatrix.D[this_thread], rCurrentProcessInfo);
// Adding the dynamic contributions (static is already included)
AddDynamicsToRHS(rCurrentCondition, rRHSContribution, mMatrix.D[this_thread], mMatrix.M[this_thread], rCurrentProcessInfo);
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Calculate_RHS_Contribution");
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
const ProcessInfo r_current_process_info= rModelPart.GetProcessInfo();
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const double delta_time = r_current_process_info[DELTA_TIME];
KRATOS_ERROR_IF(delta_time < 1.0e-24) << "ERROR:: Detected delta_time = 0 in the Solution Scheme DELTA_TIME. PLEASE : check if the time step is created correctly for the current time step" << std::endl;
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.InitializeSolutionStep");
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided.
* @details Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = BaseType::Check(rModelPart);
if(err!=0) return err;
return 0;
KRATOS_CATCH("ResidualBasedImplicitTimeScheme.Check");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "residualbased_implicit_time_scheme"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedImplicitTimeScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralMatrices
{
std::vector< Matrix > M; /// First derivative matrix (usually mass matrix)
std::vector< Matrix > D; /// Second derivative matrix (usually damping matrix)
};
GeneralMatrices mMatrix;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief It adds the dynamic LHS contribution of the elements LHS = d(-RHS)/d(un0) = c0*c0*M + c0*D + K
* @param LHS_Contribution The dynamic contribution for the LHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToLHS(
LocalSystemMatrixType& LHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToLHS" << std::endl;
}
/**
* @brief It adds the dynamic RHS contribution of the elements b - M*a - D*v
* @param rCurrentElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToRHS(
Element& rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToRHS" << std::endl;
}
/**
* @brief It adds the dynamic RHS contribution of the condition RHS = fext - M*an0 - D*vn0 - K*dn0
* @param rCurrentCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
virtual void AddDynamicsToRHS(
Condition& rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
LocalSystemMatrixType& D,
LocalSystemMatrixType& M,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_ERROR << "YOU ARE CALLING THE BASE CLASS OF AddDynamicsToRHS" << std::endl;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedImplicitTimeScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_IMPLICIT_TIME_SCHEME defined */
|
MPI + OpenMP Matrix Multiplication.c | #include "mpi.h"
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
typedef double ttype;
ttype tdiff(struct timespec a, struct timespec b)
/* Find the time difference. */
{
ttype dt = (( b.tv_sec - a.tv_sec ) + ( b.tv_nsec - a.tv_nsec ) / 1E9);
return dt;
}
struct timespec now()
/* Return the current time. */
{
struct timespec t;
clock_gettime(CLOCK_REALTIME, &t);
return t;
}
/*MPI_Type Enum*/
enum {
MASTER=0,
FROM_MASTER=1,
FROM_WORKER=2,
NRA = 10 ,
NCA = 10 ,
NRB = 10 ,
NCB = 10 ,
};
int main(int argc, char *argv[]) {
//MPI
int num_tasks; /* number of tasks */
int task_id; /* number of processes */
int num_workers; /* number of worker tasks */
int source; /* rank of sender */
int dest; /* rank of receiver */
int mtype; /* message type */
int tag = 0; /* tag for messages */
char message[100]; /* storage for message */
MPI_Status status; /* return status for receive */
int rows;
int offset;
int num_thread = atof(argv[1]);
//clock_t begin, end;
struct timespec begin, end;
double time_spent;
//Matrix Mult
int i,j,k = 0;
int matrix_row, matrix_column = 0;
int vector_row, vector_column = 0;
/* Start up MPI */
MPI_Init(&argc, &argv);
/* Find out process rank */
MPI_Comm_rank(MPI_COMM_WORLD, &task_id);
/* Find out number of processes */
MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
num_workers = num_tasks - 1;
if (task_id == MASTER) {
matrix_row = NRA ;
matrix_column = NRB ;
int matrix[matrix_row][matrix_column];
for ( i = 0; i < matrix_row; i++ )
for ( j = 0; j < matrix_column; j++ )
matrix[i][j] = i + j ;
if((matrix_row + matrix_column )< 22){
printf("Matrix A:\n");
for ( i = 0; i < matrix_row; i++ ) {
for ( j = 0; j < matrix_column; j++ )
printf("%d\t", matrix[i][j]);
printf("\n");
}
}
vector_row = NRB;
vector_column = NCB ;
int vector[vector_row][vector_column];
//Do some basic error checking
if ( matrix_column != vector_row ) {
printf("Matrices with entered orders can't be multiplied with each other.\n");
return 1;
}
for ( i = 0; i < vector_row; i++ )
for ( j = 0; j < vector_column; j++ )
vector[i][j] = i-j;
if((vector_row + vector_column )< 22){
printf("Vector B:\n");
for ( i = 0; i < vector_row; i++ ) {
for ( j = 0; j < vector_column; j++ )
printf("%d\t", vector[i][j]);
printf("\n");
}
}
//begin = clock();
begin = now();
//Partiion the matrix data into smaller groups
int average_row = matrix_row / num_workers;
int left_overs = matrix_row % num_workers;
//Send matrix data to worker tasks
int offset = 0;
int rows = 0;
mtype = FROM_MASTER;
for (dest = 1; dest < num_tasks; dest++) {
rows = (dest <= left_overs) ? average_row + 1 : average_row;
printf("Sending %d rows to task %d\n", rows, dest);
MPI_Send( &offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&matrix_row, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&matrix_column, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&matrix[offset][0], rows * matrix_column, MPI_INT, dest, mtype,
MPI_COMM_WORLD);
MPI_Send(&vector_row, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&vector_column, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
MPI_Send(&vector, vector_row * vector_column, MPI_INT, dest, mtype, MPI_COMM_WORLD);
offset += rows;
}
printf("Sent all the data!\n");
//Define size of result matrix
int product[matrix_row][vector_column];
//Wait for results from workers
mtype = FROM_WORKER;
for( i=1; i < num_tasks; i++) {
source = i;
printf("Receiving data from task %d\n", source);
MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
int result[rows][vector_column];
MPI_Recv(&result, rows * vector_column, MPI_INT, source, mtype,
MPI_COMM_WORLD, &status);
//put results into their proper place in the product matrix
int real_index = 0;
for ( j = 0; j < rows; j++ ) {
for ( k = 0; k < vector_column; k++ ) {
real_index = j + offset;
product[real_index][k] = result[j][k];
}
}
}
//Print the results
if((matrix_row + vector_column )< 22){
printf("Product of entered matrices:-\n");
for ( i = 0; i < matrix_row; i++ ) {
for ( j = 0; j < vector_column; j++ ) {
printf("%d\t", product[i][j]);
}
printf("\n");
}
}
} else {
//Worker task
mtype = FROM_MASTER;
//Get the rows from matrix A (ie matrix[][])
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&matrix_row, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&matrix_column, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
int matrix[rows][matrix_column];
MPI_Recv(&matrix, rows*matrix_column, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
//Get vector b (ie vector[][])
MPI_Recv(&vector_row, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&vector_column, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
int vector[vector_row][vector_column];
MPI_Recv(&vector, vector_row * vector_column, MPI_INT, MASTER, mtype,
MPI_COMM_WORLD, &status);
//Compute the result matrix
int result[rows][vector_column];
omp_set_num_threads(num_thread);
#pragma omp parallel for
for( i=0; i < vector_column; i++) {
for( j=0; j < rows; j++) {
result[j][i] = 0;
for (k = 0; k < vector_row; k++) {
result[j][i]= result[j][i] + matrix[j][k] * vector[k][i];
}
}
}
//Send the results back to the Master process
mtype = FROM_WORKER;
MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
MPI_Send(&result, rows * vector_column, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
}
//MPI_Finalize();
if(task_id ==MASTER){
//end = clock();
end = now();
//printf("start %f, end %f\n",(double)begin/CLOCKS_PER_SEC,(double)end/CLOCKS_PER_SEC);
//time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
time_spent = tdiff(begin, end);
printf("( %d )time spent %.8f sec\n",num_tasks, time_spent);
}
MPI_Finalize();
return 0;
}
|
sapH_fmt_plug.c | /*
* this is a SAP-H plugin for john the ripper.
* Copyright (c) 2014 JimF, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
* The internals of this algorithm were found on the hashcat forum, and
* implemented here, whether, it is right or wrong. A link to that post is:
* http://hashcat.net/forum/thread-3804.html
* There are some things which are unclear, BUT which have been coded as listed
* within that post. Things such as the signatures themselves are somewhat
* unclear, and do not follow patterns well. The sha1 signature is lower case
* and does not contain the 1. The other signatures are upper case. This code
* was implemented in the exact manner as described on the forum, and will be
* used as such, until we find out that it is right or wrong (i.e. we get sample
* hashs from a REAL system in the other formats). If things are not correct,
* getting this format corrected will be trivial.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapH;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapH);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
/* for now, undef this until I get OMP working, then start on SIMD */
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA1
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
#if !ARCH_LITTLE_ENDIAN
// For now, neuter this format from SIMD building.
// Someone else can port to BE at a later date.
#undef SIMD_COEF_32
#undef SIMD_PARA_SHA1
#undef SIMD_PARA_SHA256
#undef SIMD_COEF_64
#undef SIMD_PARA_SHA512
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "sha.h"
#include "sha2.h"
#include "johnswap.h"
#if defined(_OPENMP)
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#endif
/*
* Assumption is made that SIMD_COEF_32*SIMD_PARA_SHA1 is >= than
* SHA256_COEF*PARA and SHA512_COEF*PARA, and that these other 2
* will evenly divide the SIMD_COEF_32*SHA1_SSRE_PARA value.
* Works with current code. BUT if SIMD_PARA_SHA1 was 3 and
* SIMD_PARA_SHA256 was 2, then we would have problems.
*/
#ifdef SIMD_COEF_32
#define NBKEYS1 (SIMD_COEF_32 * SIMD_PARA_SHA1)
#else
#define NBKEYS1 1
#endif
#ifdef SIMD_COEF_32
#define NBKEYS256 (SIMD_COEF_32 * SIMD_PARA_SHA256)
#else
#define NBKEYS256 1
#endif
#ifdef SIMD_COEF_64
#define NBKEYS512 (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define NBKEYS512 1
#endif
// the least common multiple of the NBKEYS* above
#define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA1*SIMD_PARA_SHA256*SIMD_PARA_SHA512)
#include "simd-intrinsics.h"
#define FORMAT_LABEL "saph"
#define FORMAT_NAME "SAP CODVN H (PWDSALTEDHASH)"
#define FORMAT_TAG "{x-issha, "
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG256 "{x-isSHA256, "
#define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1)
#define FORMAT_TAG384 "{x-isSHA384, "
#define FORMAT_TAG384_LEN (sizeof(FORMAT_TAG384)-1)
#define FORMAT_TAG512 "{x-isSHA512, "
#define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1)
#define ALGORITHM_NAME "SHA-1/SHA-2 " SHA1_ALGORITHM_NAME
#include "memdbg.h"
#define BENCHMARK_COMMENT " (SHA1x1024)"
#define BENCHMARK_LENGTH 0
#define SALT_LENGTH 16 /* the max used sized salt */
#define CIPHERTEXT_LENGTH 132 /* max salt+sha512 + 2^32 iterations */
#define BINARY_SIZE 16 /* we cut off all hashes down to 16 bytes */
#define MAX_BINARY_SIZE 64 /* sha512 is 64 byte */
#define SHA1_BINARY_SIZE 20
#define SHA256_BINARY_SIZE 32
#define SHA384_BINARY_SIZE 48
#define SHA512_BINARY_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct sapH_salt)
#define SALT_ALIGN 4
/* NOTE, format is slow enough that endianity conversion is pointless. Just use flat buffers. */
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define PLAINTEXT_LENGTH 23 /* Real world max. is 40 */
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH 40
#endif
static struct fmt_tests tests[] = {
/* first 2 hashes are 'default' 1024 iteration with 12 bytes salt so */
/* timings reflect that, and benchmark comment set to (sha1, 1024) */
{"{x-issha, 1024}hmiyJ2a/Z+HRpjQ37Osz+rYax9UxMjM0NTY3ODkwYWI=","OpenWall"},
{"{x-issha, 1024}fRLe9EvN/Le81BDEDZR5SEC0O6BhYmNkZWZnaHVrYWw=","JohnTheRipper"},
{"{x-issha, 1024}L1PHSP1vOwdYh0ASjswI69fQQQhzQXFlWmxnaFA5","booboo"},
{"{x-issha, 1024}dCjaHQ47/WeSwsoSYDR/8puLby5T","booboo"}, /* 1 byte salt */
{"{x-issha, 1024}+q+WSxWXJt7SjV5VJEymEKPUbn1FQWM=","HYulafeE!3"},
{"{x-issha, 6666}7qNFlIR+ZQUpe2DtSBvpvzU5VlBzcG1DVGxvOEFQODI=","dif_iterations"},
{"{x-isSHA256, 3000}UqMnsr5BYN+uornWC7yhGa/Wj0u5tshX19mDUQSlgih6OTFoZjRpMQ==","booboo"},
{"{x-isSHA256, 3000}ydi0JlyU6lX5305Qk/Q3uLBbIFjWuTyGo3tPBZDcGFd6NkFvV1gza3RkNg==","GottaGoWhereNeeded"},
{"{x-isSHA384, 5000}3O/F4YGKNmIYHDu7ZQ7Q+ioCOQi4HRY4yrggKptAU9DtmHigCuGqBiAPVbKbEAfGTzh4YlZLWUM=","booboo"},
{"{x-isSHA384, 5000}XSLo2AKIvACwqW/X416UeVbHOXmio4u27Z7cgXS2rxND+zTpN+x3JNfQcEQX2PT0Z3FPdEY2dHM=","yiPP3rs"},
{"{x-isSHA512, 7500}ctlX6qYsWspafEzwoej6nFp7zRQQjr8y22vE+xeveIX2gUndAw9N2Gep5azNUwuxOe2o7tusF800OfB9tg4taWI4Tg==","booboo"},
{"{x-isSHA512, 7500}Qyrh2JXgGkvIfKYOJRdWFut5/pVnXI/vZvqJ7N+Tz9M1zUTXGWCZSom4az4AhqOuAahBwuhcKqMq/pYPW4h3cThvT2JaWVBw","hapy1CCe!"},
{"{x-isSHA512, 18009}C2+Sij3JyXPPDuQgsF6Zot7XnjRFX86X67tWJpUzXNnFw2dKcGPH6HDEzVJ8HN8+cJe4vZaOYTlmdz09gI7YEwECAwQFBgcICQoLDA0ODwA=","maxlen"},
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE/sizeof(uint32_t)];
static struct sapH_salt {
int slen; /* actual length of salt ( 1 to 16 bytes) */
int type; /* 1, 256, 384 or 512 for sha1, sha256, sha384 or sha512 */
unsigned iter; /* from 1 to 2^32 rounds */
unsigned char s[SALT_LENGTH];
} *sapH_cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp = ciphertext;
char *keeptr;
int len, hash_len=0;
char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
/* first check for 'simple' signatures before allocation other stuff. */
if (!strncmp(cp, FORMAT_TAG, FORMAT_TAG_LEN))
hash_len = SHA1_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN))
hash_len = SHA256_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN))
hash_len = SHA384_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN))
hash_len = SHA512_BINARY_SIZE;
else
return 0;
keeptr = strdup(cp);
cp = keeptr;
while (*cp++ != ' ') ; /* skip the "{x-issha?, " */
if ((cp = strtokm(cp, "}")) == NULL)
goto err;
if (!isdecu(cp))
goto err;
// we want the entire rest of the line here, to mime compare.
if ((cp = strtokm(NULL, "")) == NULL)
goto err;
if (strlen(cp) != base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ|flg_Base64_MIME_TRAIL_EQ_CNT, 0))
goto err;
len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ|flg_Base64_DONOT_NULL_TERMINATE, 0);
len -= hash_len;
if (len < 1 || len > SALT_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
sapH_cur_salt = (struct sapH_salt*)salt;
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_plain[index], key, sizeof(*saved_plain));
}
static char *get_key(int index)
{
return (char*)saved_plain[index];
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
if (*(uint32_t*)binary == *(uint32_t*)crypt_key[index])
return 1;
return 0;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static void crypt_all_1(int count) {
int idx=0;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS1)
{
SHA_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA1_BINARY_SIZE], *cp=&tmp[len];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA1_BINARY_SIZE;
SHA1_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, tmp, len);
SHA1_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS1+MEM_ALIGN_SIMD], *keys, tmpBuf[20], _OBuf[20*NBKEYS1+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS1], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS1);
for (i = 0; i < NBKEYS1; ++i) {
len = strlen(saved_plain[idx+i]);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx+i], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA1_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 20);
keys[(i<<6)+len+20] = 0x80;
offs[i] = len;
len += 20;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA1body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS1; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 5; ++j) {
// likely location for BE porting
#if ARCH_ALLOWS_UNALIGNED
Icp32[j] = JOHNSWAP(*pcrypt);
#else
uint32_t tmp = JOHNSWAP(*pcrypt);
memcpy(&Icp32[j], &tmp, 4);
#endif
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS1; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 20
for (j = 0; j < 4; ++j) {
Optr32[j] = JOHNSWAP(*Iptr32);
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_256(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS256) {
SHA256_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA256_BINARY_SIZE], *cp=&tmp[len];
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA256_BINARY_SIZE;
SHA256_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, tmp, len);
SHA256_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS256+MEM_ALIGN_SIMD], *keys, tmpBuf[32], _OBuf[32*NBKEYS256+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS256], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS256);
for (i = 0; i < NBKEYS256; ++i) {
len = strlen(saved_plain[idx+i]);
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx+i], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA256_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 32);
keys[(i<<6)+len+32] = 0x80;
offs[i] = len;
len += 32;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA256body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS256; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 8; ++j) {
#if ARCH_ALLOWS_UNALIGNED
Icp32[j] = JOHNSWAP(*pcrypt);
#else
uint32_t tmp = JOHNSWAP(*pcrypt);
memcpy(&Icp32[j], &tmp, 4);
#endif
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS256; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 32
for (j = 0; j < 4; ++j) {
#if ARCH_ALLOWS_UNALIGNED
Optr32[j] = JOHNSWAP(*Iptr32);
#else
uint32_t tmp = JOHNSWAP(*Iptr32);
memcpy(&Optr32[j], &tmp, 4);
#endif
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_384(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA384_BINARY_SIZE], *cp=&tmp[len];
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA384_BINARY_SIZE;
SHA384_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA384_Init(&ctx);
SHA384_Update(&ctx, tmp, len);
SHA384_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
uint64_t j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (uint64_t*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx+i], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA384_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 48);
keys[(i<<7)+len+48] = 0x80;
offs[i] = len;
len += 48;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN|SSEi_CRYPT_SHA384);
for (k = 0; k < NBKEYS512; ++k) {
uint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
uint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 6; ++j) {
#if ARCH_ALLOWS_UNALIGNED
Icp64[j] = JOHNSWAP64(*pcrypt);
#else
uint64_t tmp = JOHNSWAP64(*pcrypt);
memcpy(&Icp64[j], &tmp, 8);
#endif
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
uint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);
uint64_t *Iptr64 = &crypt64[ ((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 48
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static void crypt_all_512(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA512_BINARY_SIZE], *cp=&tmp[len];
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA512_BINARY_SIZE;
SHA512_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, tmp, len);
SHA512_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
uint64_t j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (uint64_t*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx+i], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA512_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 64);
keys[(i<<7)+len+64] = 0x80;
offs[i] = len;
len += 64;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS512; ++k) {
uint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
uint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 8; ++j) {
#if ARCH_ALLOWS_UNALIGNED
Icp64[j] = JOHNSWAP64(*pcrypt);
#else
uint64_t tmp = JOHNSWAP64(*pcrypt);
memcpy(&Icp64[j], &tmp, 8);
#endif
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
uint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);
uint64_t *Iptr64 = &crypt64[((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 64
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
/*
* split logic into 4 separate functions, to make the logic more
* simplistic, when we start adding OMP + SIMD code
*/
switch(sapH_cur_salt->type) {
case 1: crypt_all_1(*pcount); break;
case 2: crypt_all_256(*pcount); break;
case 3: crypt_all_384(*pcount); break;
case 4: crypt_all_512(*pcount); break;
}
return *pcount;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char cp[BINARY_SIZE]; /* only stores part the size of each hash */
uint32_t jnk[BINARY_SIZE/4];
} b;
char *cp = ciphertext;
memset(b.cp, 0, sizeof(b.cp));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { cp += FORMAT_TAG_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { cp += FORMAT_TAG256_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { cp += FORMAT_TAG384_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { cp += FORMAT_TAG512_LEN; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
while (*cp != '}') ++cp;
++cp;
base64_convert(cp, e_b64_mime, strlen(cp), b.cp, e_b64_raw,
BINARY_SIZE, flg_Base64_MIME_TRAIL_EQ|flg_Base64_DONOT_NULL_TERMINATE, 0);
return b.cp;
}
static void *get_salt(char *ciphertext)
{
static struct sapH_salt s;
char *cp = ciphertext;
unsigned char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
int total_len, hash_len = 0;
memset(&s, 0, sizeof(s));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { s.type = 1; cp += FORMAT_TAG_LEN; hash_len = SHA1_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { s.type = 2; cp += FORMAT_TAG256_LEN; hash_len = SHA256_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { s.type = 3; cp += FORMAT_TAG384_LEN; hash_len = SHA384_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { s.type = 4; cp += FORMAT_TAG512_LEN; hash_len = SHA512_BINARY_SIZE; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
sscanf(cp, "%u", &s.iter);
while (*cp != '}') ++cp;
++cp;
total_len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ|flg_Base64_DONOT_NULL_TERMINATE, 0);
s.slen = total_len-hash_len;
memcpy(s.s, &tmp[hash_len], s.slen);
return &s;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
/* we 'could' cash switch the SHA/sha and unify case. If they an vary, we will have to. */
return ciphertext;
}
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < sizeof(struct sapH_salt); i++)
hash = ((hash << 5) + hash) ^ cp[i];
return hash & (SALT_HASH_SIZE - 1);
}
static unsigned int sapH_type(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->type;
}
static unsigned int iteration_count(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->iter;
}
struct fmt_main fmt_sapH = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_OMP | FMT_CASE | FMT_8_BIT | FMT_UTF8,
{
"hash type [1:SHA1 2:SHA256 3:SHA384 4:SHA512]",
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG256, FORMAT_TAG384, FORMAT_TAG512 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{
sapH_type,
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
snap_red.c | #include <stdio.h>
#define N 10
int main (void)
{
long int aa=0;
int res = 0;
int ng =12;
int cmom = 14;
int nxyz = 5000;
// fails for 149 and above: nxyz=149;
#pragma omp target teams distribute num_teams(149) thread_limit(ng*(cmom-1)) map(tofrom:aa)
for (int gid = 0; gid < nxyz; gid++) {
// int bb=0;
#pragma omp parallel for collapse(2)
for (unsigned int g = 0; g < ng; g++) {
for (unsigned int l = 0; l < cmom-1; l++) {
int a = 0;
#pragma omp parallel for reduction(+:a)
for (int i = 0; i < N; i++) {
a += i;
}
#pragma omp atomic
aa += a;
}
}
// #pragma omp atomic
//aa += bb;
}
printf ("The result is = %ld!\n", aa);
if (aa != 35100000) {
printf("Failed\n");
return 1;
}
return 0;
}
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedBlockBuilderAndSolver"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(b.size());
InternalSystemSolveWithPhysics(A, Dxmodified, b, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, Dx);
} else {
InternalSystemSolveWithPhysics(A, Dx, b, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING("ResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, b);
SystemSolve(A, Dx, b);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHSNoDirichlet(pScheme,rModelPart,b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0;
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = OpenMPUtils::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it_elem.base()), dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it_cond.base()), dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids.
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
std::size_t system_size = A.size1();
std::vector<double> scaling_factors (system_size, 0.0);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
if(dof_iterator->IsFixed())
scaling_factors[k] = 0.0;
else
scaling_factors[k] = 1.0;
}
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k){
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
bool empty = true;
for (std::size_t j = col_begin; j < col_end; ++j)
{
if(Avalues[j] != 0.0)
{
empty = false;
break;
}
}
if(empty == true)
{
A(k,k) = 1.0;
b[k] = 0.0;
}
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(system_size); ++k)
{
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
double k_factor = scaling_factors[k];
if (k_factor == 0)
{
// zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k )
Avalues[j] = 0.0;
// zero out the RHS
b[k] = 0.0;
}
else
{
// zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
}
}
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
b_modified.resize(0, false); //free memory
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
double max_diag = 0.0;
for(IndexType i = 0; i < rA.size1(); ++i) {
max_diag = std::max(std::abs(rA(i,i)), max_diag);
}
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].SetLock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].UnSetLock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mT.size1()); ++i) {
const IndexType row_begin = Trow_indices[i];
const IndexType row_end = Trow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
}
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
#pragma omp atomic
r_value += constant_value;
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
#pragma omp atomic
r_a += v_a;
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
last_found = id_to_find;
last_pos = pos;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
Fig_6.4_wrongPrivate.c | // sample compile command: "gcc -fopenmp -c Fig_6.4_wrongPrivate.c" to generate *.o object file
#include <stdio.h>
void wrong()
{
int tmp = 0;
#pragma omp parallel for private(tmp)
for (int j = 0; j < 1000; j++)
tmp += j;
printf("%d\n", tmp); //tmp is 0 here
}
|
depth_to_space.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_DEPTH_TO_SPACE_H_
#define MACE_KERNELS_DEPTH_TO_SPACE_H_
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/public/mace.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
template<DeviceType D, typename T>
struct DepthToSpaceOpFunctor {
explicit DepthToSpaceOpFunctor(const int block_size, bool d2s)
: block_size_(block_size), d2s_(d2s) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch_size = input->dim(0);
const index_t input_depth = input->dim(1);
const index_t input_height = input->dim(2);
const index_t input_width = input->dim(3);
index_t output_depth, output_width, output_height;
if (d2s_) {
output_depth = input_depth / (block_size_ * block_size_);
output_width = input_width * block_size_;
output_height = input_height * block_size_;
} else {
output_depth = input_depth * block_size_ * block_size_;
output_width = input_width / block_size_;
output_height = input_height / block_size_;
}
std::vector<index_t> output_shape = {batch_size, output_depth,
output_height, output_width};
MACE_RETURN_IF_ERROR(output->Resize(output_shape));
Tensor::MappingGuard logits_guard(input);
Tensor::MappingGuard output_guard(output);
const T *input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
if (d2s_) {
#pragma omp parallel for
for (index_t b = 0; b < batch_size; ++b) {
for (index_t d = 0; d < output_depth; ++d) {
for (index_t h = 0; h < output_height; ++h) {
const index_t in_h = h / block_size_;
const index_t offset_h = (h % block_size_);
for (int w = 0; w < output_width; ++w) {
const index_t in_w = w / block_size_;
const index_t offset_w = w % block_size_;
const index_t offset_d =
(offset_h * block_size_ + offset_w) * output_depth;
const index_t in_d = d + offset_d;
const index_t o_index =
((b * output_depth + d) * output_height + h) * output_width
+ w;
const index_t i_index =
((b * input_depth + in_d) * input_height + in_h) * input_width
+ in_w;
output_ptr[o_index] = input_ptr[i_index];
}
}
}
}
} else {
#pragma omp parallel for
for (index_t b = 0; b < batch_size; ++b) {
for (index_t d = 0; d < input_depth; ++d) {
for (index_t h = 0; h < input_height; ++h) {
const index_t out_h = h / block_size_;
const index_t offset_h = (h % block_size_);
for (index_t w = 0; w < input_width; ++w) {
const index_t out_w = w / block_size_;
const index_t offset_w = (w % block_size_);
const index_t offset_d =
(offset_h * block_size_ + offset_w) * input_depth;
const index_t out_d = d + offset_d;
const index_t o_index =
((b * output_depth + out_d) * output_height + out_h)
* output_width + out_w;
const index_t i_index =
((b * input_depth + d) * input_height + h) * input_width
+ w;
output_ptr[o_index] = input_ptr[i_index];
}
}
}
}
}
return MACE_SUCCESS;
}
const int block_size_;
bool d2s_;
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct DepthToSpaceOpFunctor<DeviceType::GPU, T> {
DepthToSpaceOpFunctor(const int block_size, bool d2s)
: block_size_(block_size), d2s_(d2s) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
const int block_size_;
bool d2s_;
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_DEPTH_TO_SPACE_H_
|
sum_float.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
float sum(float *X) {
float result = 0;
#pragma omp simd reduction(+:result)
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
float sum_serial(float *X) {
float result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %f\n", result);
puts("---------------------------------");
printf("Serial: %f\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %f\n", result_serial - result);
free(X);
return 0;
}
|
fermi_bool_table.h | #pragma once
#include <vector>
namespace hydra::symmetries {
// Creates the table of Fermi signs for a given particle number
template <class States, class GroupAction>
std::vector<bool> fermi_bool_table(States &&states,
GroupAction &&group_action) {
assert(states.n() == group_action.n_sites());
int n_sites = group_action.n_sites();
int n_symmetries = group_action.n_symmetries();
idx_t raw_size = states.size();
std::vector<bool> fermi_bool_table(raw_size * n_symmetries);
const int *sym_ptr = group_action.permutation_array().data();
#ifndef _OPENMP
auto fermi_work = symmetries::fermi_work(n_sites);
for (int sym = 0; sym < n_symmetries; ++sym) {
idx_t idx = 0;
for (auto state : states) {
fermi_bool_table[sym * raw_size + idx] =
fermi_bool_of_permutation(state, sym_ptr, fermi_work.data());
++idx;
}
sym_ptr += n_sites;
}
#else
for (int sym = 0; sym < n_symmetries; ++sym) {
std::vector<std::vector<bool>> fermi_bool_table_local;
// auto t1 = lila::rightnow();
#pragma omp parallel
{
int myid = omp_get_thread_num();
int rank = omp_get_num_threads();
#pragma omp single
{ fermi_bool_table_local.resize(rank); }
#pragma omp barrier
auto states_thread = StatesOfThread(states);
fermi_bool_table_local[myid].resize(states_thread.end() -
states_thread.begin());
auto fermi_work = fermi_work(n_sites);
idx_t idx = 0;
for (auto state : states_thread) {
bool fermi_bool =
fermi_bool_of_permutation(state, sym_ptr, fermi_work.data());
fermi_bool_table_local[myid][idx] = fermi_bool;
++idx;
}
} // pragma omp parallel
// lila::timing(t1, lila::rightnow(), "fill");
// auto t2 = lila::rightnow();
auto fermi_bool_table_for_sym =
utils::combine_vectors_copy(fermi_bool_table_local);
std::copy(fermi_bool_table_for_sym.begin(), fermi_bool_table_for_sym.end(),
fermi_bool_table.begin() + sym * raw_size);
// lila::timing(t2, lila::rightnow(), "combine");
sym_ptr += n_sites;
}
#endif // ifndef _OPENMP
return fermi_bool_table;
}
} // namespace hydra::symmetries
|
gt.stats.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.stats.c
* DATE: 02/08/2012
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Utility to retrieve very naive stats from {MAP,SAM,FASTQ} files
*/
#include <getopt.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
#define GT_STATS_OUT_FILE stdout
typedef struct {
/* [Input] */
char *name_input_file;
char *name_reference_file;
char *name_output_file;
FILE* output_file;
FILE* output_file_json;
bool mmap_input;
bool paired_end;
uint64_t num_reads;
/* [Tests] */
bool first_map;
bool maps_profile;
bool mismatch_transitions;
bool mismatch_quality;
bool splitmaps_profile;
bool indel_profile;
bool population_profile;
/* [MAP Specific] */
bool use_only_decoded_maps;
/* [Output] */
bool verbose;
bool compact; // FIXME Deleteme
bool print_json;
bool print_both;
/* [Misc] */
uint64_t num_threads;
} gt_stats_args;
gt_stats_args parameters = {
/* [Input] */
.name_input_file=NULL,
.name_reference_file=NULL,
.name_output_file=NULL,
.mmap_input=false,
.paired_end=false,
.num_reads=0,
.output_file=NULL,
.output_file_json=NULL,
/* [Tests] */
.first_map=false,
.maps_profile = false,
.mismatch_transitions = false,
.mismatch_quality = false,
.splitmaps_profile = false,
.indel_profile = false,
.population_profile = false,
/* [MAP Specific] */
.use_only_decoded_maps = false,
/* [Output] */
.verbose=false,
.compact = false,
/* [Misc] */
.num_threads=1,
.print_json=false,
.print_both=false,
};
/*
* STATS Print results
*/
JsonNode* gt_stats_print_json_general_stats(gt_stats* const stats,uint64_t num_reads,const bool paired_end){
/**
* General stats
*/
const uint64_t num_templates = paired_end ? num_reads/2 : num_reads;
// For the case of zero input lines
if(stats->min_length>stats->max_length) stats->min_length=0;
if(stats->mapped_min_length>stats->mapped_max_length) stats->mapped_min_length=0;
JsonNode* general_stats = json_mkobject();
json_append_member(general_stats, "num_reads", json_mknumber(num_reads));
json_append_member(general_stats, "num_templates", json_mknumber(num_templates));
json_append_member(general_stats, "read_lenght_min", json_mknumber(stats->min_length));
json_append_member(general_stats, "read_lenght_avg", json_mknumber(GT_DIV(stats->total_bases,stats->num_blocks)));
json_append_member(general_stats, "read_lenght_max", json_mknumber(stats->max_length));
json_append_member(general_stats, "templates_mapped", json_mknumber(stats->num_mapped));
json_append_member(general_stats, "reads_mapped", json_mknumber(stats->num_mapped_reads));
json_append_member(general_stats, "reads_mapped_length_min", json_mknumber(stats->mapped_min_length));
json_append_member(general_stats, "reads_mapped_length_avg", json_mknumber(GT_DIV(stats->total_bases_aligned,(paired_end)?stats->num_mapped*2:stats->num_mapped)));
json_append_member(general_stats, "reads_mapped_length_max", json_mknumber(stats->mapped_max_length));
json_append_member(general_stats, "num_bases", json_mknumber(stats->total_bases));
json_append_member(general_stats, "num_bases_aligned", json_mknumber(stats->total_bases_aligned));
json_append_member(general_stats, "bases_prop", gt_json_int_named_tuple(5,
"A", stats->nt_counting[0],
"C", stats->nt_counting[1],
"G", stats->nt_counting[2],
"T", stats->nt_counting[3],
"N", stats->nt_counting[4]
));
json_append_member(general_stats, "num_alignments", json_mknumber(stats->num_alignments));
json_append_member(general_stats, "read_length_ranges", gt_json_int_named_tuple(
GT_STATS_LENGTH_RANGE,
"[0,5]", stats->length[GT_STATS_LENGTH_RANGE_5],
"(5,40]", stats->length[GT_STATS_LENGTH_RANGE_40],
"(40,80]", stats->length[GT_STATS_LENGTH_RANGE_80],
"(80,100]", stats->length[GT_STATS_LENGTH_RANGE_100],
"(100,150]", stats->length[GT_STATS_LENGTH_RANGE_150],
"(150,300]", stats->length[GT_STATS_LENGTH_RANGE_300],
"(300,800]", stats->length[GT_STATS_LENGTH_RANGE_800],
"(800,1000]", stats->length[GT_STATS_LENGTH_RANGE_1000],
"(1000,2000]", stats->length[GT_STATS_LENGTH_RANGE_2000],
"(2000,5000]", stats->length[GT_STATS_LENGTH_RANGE_5000],
"(5000,inf)", stats->length[GT_STATS_LENGTH_RANGE_BEHOND]
));
json_append_member(general_stats, "read_length_ranges_mapped", gt_json_int_named_tuple(
GT_STATS_LENGTH_RANGE,
"[0,5]", stats->length_mapped[GT_STATS_LENGTH_RANGE_5],
"(5,40]", stats->length_mapped[GT_STATS_LENGTH_RANGE_40],
"(40,80]", stats->length_mapped[GT_STATS_LENGTH_RANGE_80],
"(80,100]", stats->length_mapped[GT_STATS_LENGTH_RANGE_100],
"(100,150]", stats->length_mapped[GT_STATS_LENGTH_RANGE_150],
"(150,300]", stats->length_mapped[GT_STATS_LENGTH_RANGE_300],
"(300,800]", stats->length_mapped[GT_STATS_LENGTH_RANGE_800],
"(800,1000]", stats->length_mapped[GT_STATS_LENGTH_RANGE_1000],
"(1000,2000]", stats->length_mapped[GT_STATS_LENGTH_RANGE_2000],
"(2000,5000]", stats->length_mapped[GT_STATS_LENGTH_RANGE_5000],
"(5000,inf)", stats->length_mapped[GT_STATS_LENGTH_RANGE_BEHOND]
));
json_append_member(general_stats, "read_qualities_avg", gt_json_int_array(32, 192, stats->avg_quality));
JsonNode* read_length_quals = json_mkobject();
json_append_member(read_length_quals, "[0,5]", gt_json_int_array(GT_STATS_LENGTH_RANGE_5*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(5,40]", gt_json_int_array(GT_STATS_LENGTH_RANGE_40*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(40,80]", gt_json_int_array(GT_STATS_LENGTH_RANGE_80*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(80,100]", gt_json_int_array(GT_STATS_LENGTH_RANGE_100*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(100,150]", gt_json_int_array(GT_STATS_LENGTH_RANGE_150*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(300,800]", gt_json_int_array(GT_STATS_LENGTH_RANGE_300*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(800,1000]", gt_json_int_array(GT_STATS_LENGTH_RANGE_800*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(1000,2000]", gt_json_int_array(GT_STATS_LENGTH_RANGE_1000*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(2000,5000]", gt_json_int_array(GT_STATS_LENGTH_RANGE_2000*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(read_length_quals, "(5000,inf)", gt_json_int_array(GT_STATS_LENGTH_RANGE_BEHOND*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->length__quality));
json_append_member(general_stats, "read_qualities_per_length", read_length_quals);
return general_stats;
}
JsonNode* gt_stats_create_error_distribution(uint64_t* data){
return gt_json_int_named_tuple(
GT_STATS_MISMS_RANGE,
"[0]", data[GT_STATS_MISMS_RANGE_0],
"[1]", data[GT_STATS_MISMS_RANGE_1],
"[2]", data[GT_STATS_MISMS_RANGE_2],
"[3]", data[GT_STATS_MISMS_RANGE_3],
"[4]", data[GT_STATS_MISMS_RANGE_4],
"[5]", data[GT_STATS_MISMS_RANGE_5],
"[6]", data[GT_STATS_MISMS_RANGE_6],
"[7]", data[GT_STATS_MISMS_RANGE_7],
"[8]", data[GT_STATS_MISMS_RANGE_8],
"[9]", data[GT_STATS_MISMS_RANGE_9],
"[10]", data[GT_STATS_MISMS_RANGE_10],
"(10,20]", data[GT_STATS_MISMS_RANGE_20],
"(20,50]", data[GT_STATS_MISMS_RANGE_50],
"(50,100]", data[GT_STATS_MISMS_RANGE_BEHOND]
);
}
#define GT_STATS_GET_IXD_TRANSITION_1_CTX(a,b,c,i) ((((a*GT_STATS_MISMS_BASE_RANGE+b)*GT_STATS_MISMS_BASE_RANGE)+c)*GT_STATS_MISMS_BASE_RANGE+i)
JsonNode* gt_stats_print_json_maps_profile(gt_stats* const stats,const uint64_t num_reads,const bool paired_end) {
const gt_maps_profile* const maps_profile = stats->maps_profile;
JsonNode* profile = json_mkobject();
json_append_member(profile, "num_bases", json_mknumber(maps_profile->total_bases));
json_append_member(profile, "num_matching_bases", json_mknumber(maps_profile->total_bases_matching));
json_append_member(profile, "num_trimmed_bases", json_mknumber(maps_profile->total_bases_trimmed));
json_append_member(profile, "multi_map_ranges", gt_json_int_named_tuple(
GT_STATS_MMAP_RANGE,
"[0]", stats->mmap[GT_STATS_MMAP_RANGE_0],
"[1]", stats->mmap[GT_STATS_MMAP_RANGE_1],
"(1,5]", stats->mmap[GT_STATS_MMAP_RANGE_5],
"(5,10]", stats->mmap[GT_STATS_MMAP_RANGE_10],
"(10,50]", stats->mmap[GT_STATS_MMAP_RANGE_50],
"(50,100]", stats->mmap[GT_STATS_MMAP_RANGE_100],
"(100,500]", stats->mmap[GT_STATS_MMAP_RANGE_500],
"(500,1000]", stats->mmap[GT_STATS_MMAP_RANGE_1000],
"(1000, inf)", stats->mmap[GT_STATS_MMAP_RANGE_BEHOND]
));
JsonNode* mmap_read_length = json_mkobject();
json_append_member(mmap_read_length, "[0]", gt_json_int_array(GT_STATS_MMAP_RANGE_0*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "[1]", gt_json_int_array(GT_STATS_MMAP_RANGE_1*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(1,5]", gt_json_int_array(GT_STATS_MMAP_RANGE_5*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(5,10]", gt_json_int_array(GT_STATS_MMAP_RANGE_10*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(10,50]", gt_json_int_array(GT_STATS_MMAP_RANGE_50*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(50,100]", gt_json_int_array(GT_STATS_MMAP_RANGE_100*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(100,500]", gt_json_int_array(GT_STATS_MMAP_RANGE_500*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(500,1000]", gt_json_int_array(GT_STATS_MMAP_RANGE_1000*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(mmap_read_length, "(1000,inf)", gt_json_int_array(GT_STATS_MMAP_RANGE_BEHOND*GT_STATS_MMAP_RANGE, GT_STATS_MMAP_RANGE, stats->length__mmap));
json_append_member(profile, "read_length_multi_map_ranges", mmap_read_length);
JsonNode* mmap_read_quality = json_mkobject();
json_append_member(mmap_read_quality, "[0]", gt_json_int_array(GT_STATS_MMAP_RANGE_0*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "[1]", gt_json_int_array(GT_STATS_MMAP_RANGE_1*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(1,5]", gt_json_int_array(GT_STATS_MMAP_RANGE_5*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(5,10]", gt_json_int_array(GT_STATS_MMAP_RANGE_10*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(10,50]", gt_json_int_array(GT_STATS_MMAP_RANGE_50*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(50,100]", gt_json_int_array(GT_STATS_MMAP_RANGE_100*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(100,500]", gt_json_int_array(GT_STATS_MMAP_RANGE_500*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(500,1000]", gt_json_int_array(GT_STATS_MMAP_RANGE_1000*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(mmap_read_quality, "(1000,inf)", gt_json_int_array(GT_STATS_MMAP_RANGE_BEHOND*GT_STATS_QUAL_SCORE_RANGE, GT_STATS_QUAL_SCORE_RANGE, stats->mmap__avg_quality));
json_append_member(profile, "read_quality_multi_map_ranges", mmap_read_quality);
json_append_member(profile, "unique_ranges", gt_json_int_named_tuple(
GT_STATS_UNIQ_RANGE,
"[X]", stats->uniq[GT_STATS_UNIQ_RANGE_X],
"[0]", stats->uniq[GT_STATS_UNIQ_RANGE_0],
"[1]", stats->uniq[GT_STATS_UNIQ_RANGE_1],
"[2]", stats->uniq[GT_STATS_UNIQ_RANGE_2],
"[3]", stats->uniq[GT_STATS_UNIQ_RANGE_3],
"(3,10]", stats->uniq[GT_STATS_UNIQ_RANGE_10],
"(10,50]", stats->uniq[GT_STATS_UNIQ_RANGE_50],
"(50,100]", stats->uniq[GT_STATS_UNIQ_RANGE_100],
"(100, 500]", stats->uniq[GT_STATS_UNIQ_RANGE_500],
"(500, inf)", stats->uniq[GT_STATS_UNIQ_RANGE_BEHOND]
));
if(paired_end){
json_append_member(profile, "strands", gt_json_int_named_tuple(
4,
"F+R", maps_profile->pair_strand_fr,
"R+F", maps_profile->pair_strand_rf,
"F+F", maps_profile->pair_strand_ff,
"R+R", maps_profile->pair_strand_rr
));
}else{
json_append_member(profile, "strands", gt_json_int_named_tuple(
2,
"F", maps_profile->single_strand_f,
"R", maps_profile->single_strand_r
));
}
JsonNode* error_profile = json_mkobject();
json_append_member(error_profile, "total_mismatches", json_mknumber(maps_profile->total_mismatches));
json_append_member(error_profile, "total_errors", json_mknumber(maps_profile->total_errors_events));
json_append_member(error_profile, "total_indel_length", json_mknumber(maps_profile->total_indel_length));
json_append_member(error_profile, "total_levenshtein", json_mknumber(maps_profile->total_levenshtein));
json_append_member(error_profile, "mismatch_distribution", gt_stats_create_error_distribution(maps_profile->mismatches));
json_append_member(error_profile, "levenshtein_distribution", gt_stats_create_error_distribution(maps_profile->levenshtein));
json_append_member(error_profile, "error_distribution", gt_stats_create_error_distribution(maps_profile->errors_events));
json_append_member(error_profile, "inserts_distribution", gt_stats_create_error_distribution(maps_profile->insertion_length));
json_append_member(error_profile, "deletions_distribution", gt_stats_create_error_distribution(maps_profile->deletion_length));
json_append_member(error_profile, "error_positions", gt_json_int_array(
0,
GT_MIN(stats->max_length,GT_STATS_LARGE_READ_POS_RANGE),
maps_profile->error_position
));
json_append_member(profile, "error_profile", error_profile);
JsonNode* quality_profile = json_mkobject();
json_append_member(quality_profile, "mismatch_qualities_avg", gt_json_int_array(32, 192, maps_profile->qual_score_misms));
json_append_member(quality_profile, "error_qualities_avg", gt_json_int_array(32, 192, maps_profile->qual_score_errors));
json_append_member(profile, "quality_profile", quality_profile);
JsonNode* transition_profile = json_mkobject();
json_append_member(transition_profile, "transitions", gt_json_int_array(0,GT_STATS_MISMS_BASE_RANGE*GT_STATS_MISMS_BASE_RANGE, maps_profile->misms_transition));
JsonNode* transition_context_order = json_mkarray();
JsonNode* transition_context = json_mkarray();
// transition 1context
char bases[] = {'A','C','G','T','N'};
char* name = gt_malloc_(4, sizeof(char), false, false);
uint64_t a,b,c;
for (b=0;b<4;++b) {
for (a=0;a<4;++a) {
for (c=0;c<4;++c) {
sprintf(name, "%c%c%c", bases[a],bases[b],bases[c]);
json_append_element(transition_context_order, json_mkstring(name));
uint64_t i;
for (i=0;i<GT_STATS_MISMS_BASE_RANGE;++i) {
json_append_element(transition_context, json_mknumber(maps_profile->misms_1context[GT_STATS_GET_IXD_TRANSITION_1_CTX(a,b,c,i)]));
}
}
}
}
gt_free(name);
json_append_member(transition_profile, "transition_context", transition_context);
json_append_member(transition_profile, "transition_context_order", transition_context_order);
json_append_member(profile, "transition_profile", transition_profile);
return profile;
}
JsonNode* gt_stats_print_json_splits_profile(gt_stats* const stats,const uint64_t num_reads,const bool paired_end) {
gt_splitmaps_profile* const splitmap_stats = stats->splitmaps_profile;
JsonNode* profile = json_mkobject();
json_append_member(profile, "total_reads_with_splitmap", json_mknumber(splitmap_stats->total_splitmaps));
json_append_member(profile, "total_junctions", json_mknumber(splitmap_stats->total_junctions));
json_append_member(profile, "mapped_with_sm", json_mknumber(splitmap_stats->num_mapped_with_splitmaps));
json_append_member(profile, "mapped_only_with_sm", json_mknumber(splitmap_stats->num_mapped_only_splitmaps));
json_append_member(profile, "num_junctions", gt_json_int_named_tuple(
GT_STATS_NUM_JUNCTION_RANGE,
"[1]", splitmap_stats->num_junctions[GT_STATS_NUM_JUNCTION_1],
"[2]", splitmap_stats->num_junctions[GT_STATS_NUM_JUNCTION_2],
"[3]", splitmap_stats->num_junctions[GT_STATS_NUM_JUNCTION_3],
"(3,inf]", splitmap_stats->num_junctions[GT_STATS_NUM_JUNCTION_BEHOND]
));
json_append_member(profile, "junction_length", gt_json_int_named_tuple(
GT_STATS_LEN_JUNCTION_RANGE,
"[0,100]", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_100],
"(100,1000]", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_1000],
"(1000,5000]", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_5000],
"(5000,10000]", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_10000],
"(10000,50000]", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_50000],
"(50000,inf)", splitmap_stats->length_junctions[GT_STATS_LEN_JUNCTION_BEHOND]
));
json_append_member(profile, "junctions_positions", gt_json_int_array(
0,
GT_MIN(stats->max_length,GT_STATS_SHORT_READ_POS_RANGE),
splitmap_stats->junction_position
));
if (paired_end) {
json_append_member(profile, "splits_sm_sm", json_mknumber(splitmap_stats->pe_sm_sm));
json_append_member(profile, "splits_sm_rm", json_mknumber(splitmap_stats->pe_sm_rm));
json_append_member(profile, "splits_rm_rm", json_mknumber(splitmap_stats->pe_rm_rm));
}
return profile;
}
void gt_stats_print_json_stats(gt_stats* const stats,uint64_t num_reads,const bool paired_end) {
JsonNode* root = json_mkobject();
json_append_member(root, "general", gt_stats_print_json_general_stats(stats, num_reads, paired_end));
json_append_member(root, "maps_profile", gt_stats_print_json_maps_profile(stats, num_reads, paired_end));
json_append_member(root, "splits_profile", gt_stats_print_json_splits_profile(stats, num_reads, paired_end));
fprintf(parameters.output_file_json, "%s\n", json_stringify(root, " "));
json_delete(root);
}
void gt_stats_print_stats(gt_stats* const stats,uint64_t num_reads,const bool paired_end) {
/*
* General.Stats (Reads,Alignments,...)
*/
fprintf(parameters.output_file,"[GENERAL.STATS]\n");
gt_stats_print_general_stats(parameters.output_file,stats,num_reads,paired_end);
/*
* Maps
*/
if (parameters.maps_profile) {
fprintf(parameters.output_file,"[MAPS.PROFILE]\n");
gt_stats_print_maps_stats(parameters.output_file,stats,num_reads,paired_end);
}
/*
* Print Quality Scores vs Errors/Misms
*/
if (parameters.mismatch_quality && num_reads>0) {
const gt_maps_profile* const maps_profile = stats->maps_profile;
if (maps_profile->total_mismatches > 0) {
fprintf(parameters.output_file,"[MISMATCH.QUALITY]\n");
gt_stats_print_qualities_error_distribution(
parameters.output_file,maps_profile->qual_score_misms,maps_profile->total_mismatches);
}
if (maps_profile->total_errors_events > 0) {
fprintf(parameters.output_file,"[ERRORS.QUALITY]\n");
gt_stats_print_qualities_error_distribution(
parameters.output_file,maps_profile->qual_score_errors,maps_profile->total_errors_events);
}
}
/*
* Print Mismatch transition table
*/
if (parameters.mismatch_transitions && num_reads>0) {
const gt_maps_profile* const maps_profile = stats->maps_profile;
if (maps_profile->total_mismatches > 0) {
fprintf(parameters.output_file,"[MISMATCH.TRANSITIONS]\n");
fprintf(parameters.output_file,"MismsTransitions\n");
gt_stats_print_misms_transition_table(
parameters.output_file,maps_profile->misms_transition,maps_profile->total_mismatches);
fprintf(parameters.output_file,"MismsTransitions.1-Nucleotide.Context\n");
gt_stats_print_misms_transition_table_1context(
parameters.output_file,maps_profile->misms_1context,maps_profile->total_mismatches);
}
}
/*
* Print Splitmaps profile
*/
if (parameters.splitmaps_profile) {
fprintf(parameters.output_file,"[SPLITMAPS.PROFILE]\n");
gt_stats_print_split_maps_stats(parameters.output_file,stats,parameters.paired_end);
}
/*
* Print Population profile
*/
if (parameters.population_profile) {
fprintf(parameters.output_file,"[POPULATION.PROFILE]\n");
gt_stats_print_population_stats(parameters.output_file,stats,num_reads,parameters.paired_end);
}
}
void gt_stats_print_stats_compact(gt_stats* const stats,uint64_t num_reads,const bool paired_end) {
// #mapped, %mapped
const uint64_t num_templates = paired_end ? num_reads>>1 : num_reads; // SE => 1 template. PE => 1 template
fprintf(parameters.output_file,"%" PRIu64 ",",stats->num_mapped);
fprintf(parameters.output_file,"%2.3f,",num_templates?100.0*(float)stats->num_mapped/(float)num_templates:0.0);
// #unmapped, %unmapped
const uint64_t unmapped = num_templates-stats->num_mapped;
fprintf(parameters.output_file,"%" PRIu64 ",",unmapped);
fprintf(parameters.output_file,"%2.3f,",num_templates?100.0*(float)unmapped/(float)num_templates:0.0);
// MMap(maps/alg)
fprintf(parameters.output_file,"%2.3f,",stats->num_mapped?(float)stats->num_maps/(float)stats->num_mapped:0.0);
// Bases.aligned(%)
fprintf(parameters.output_file,"%2.3f,",GT_GET_PERCENTAGE(stats->maps_profile->total_bases_matching,stats->maps_profile->total_bases));
// Bases.trimmed(%)
fprintf(parameters.output_file,"%2.3f,",GT_GET_PERCENTAGE(stats->maps_profile->total_bases_trimmed,stats->maps_profile->total_bases));
// #Uniq-0, %Uniq-0
const uint64_t all_uniq = stats->uniq[GT_STATS_UNIQ_RANGE_0]+
stats->uniq[GT_STATS_UNIQ_RANGE_1]+stats->uniq[GT_STATS_UNIQ_RANGE_2]+
stats->uniq[GT_STATS_UNIQ_RANGE_3]+stats->uniq[GT_STATS_UNIQ_RANGE_10]+
stats->uniq[GT_STATS_UNIQ_RANGE_50]+stats->uniq[GT_STATS_UNIQ_RANGE_100]+
stats->uniq[GT_STATS_UNIQ_RANGE_500]+stats->uniq[GT_STATS_UNIQ_RANGE_BEHOND];
fprintf(parameters.output_file,"%" PRIu64 ",",all_uniq);
fprintf(parameters.output_file,"%2.3f\n",num_templates?100.0*(float)all_uniq/(float)num_templates:0.0);
}
/*
* CORE functions
*/
void gt_stats_parallel_generate_stats() {
// Stats info
gt_stats_analysis stats_analysis = GT_STATS_ANALYSIS_DEFAULT();
gt_stats** stats = gt_calloc(parameters.num_threads,gt_stats*,false);
// Select analysis
stats_analysis.first_map = parameters.first_map;
stats_analysis.maps_profile = parameters.maps_profile|parameters.mismatch_quality|parameters.mismatch_transitions;
stats_analysis.nucleotide_stats = true;
stats_analysis.splitmap_profile = parameters.splitmaps_profile;
stats_analysis.indel_profile = parameters.indel_profile;
stats_analysis.population_profile = parameters.population_profile;
stats_analysis.use_map_counters = !parameters.use_only_decoded_maps;
// Open file
gt_input_file* input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input);
gt_sequence_archive* sequence_archive = NULL;
if (stats_analysis.indel_profile) {
sequence_archive = gt_sequence_archive_new(GT_CDNA_ARCHIVE);
gt_input_file* const reference_file = gt_input_file_open(parameters.name_reference_file,false);
if (gt_input_multifasta_parser_get_archive(reference_file,sequence_archive)!=GT_IFP_OK) {
fprintf(stderr,"\n");
gt_fatal_error_msg("Error parsing reference file '%s'\n",parameters.name_reference_file);
}
gt_input_file_close(reference_file);
}
// Parallel reading+process
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
#ifdef HAVE_OPENMP
uint64_t tid = omp_get_thread_num();
#else
uint64_t tid = 0;
#endif
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_status error_code;
gt_template *template = gt_template_new();
stats[tid] = gt_stats_new();
gt_generic_parser_attributes* generic_parser_attribute = gt_input_generic_parser_attributes_new(parameters.paired_end);
while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,generic_parser_attribute))) {
if (error_code!=GT_IMP_OK) {
gt_error_msg("Fatal error parsing file '%s'\n",parameters.name_input_file);
}
// Extract stats
gt_stats_calculate_template_stats(stats[tid],template,sequence_archive,&stats_analysis);
}
// Clean
gt_template_delete(template);
gt_buffered_input_file_close(buffered_input);
}
// Merge stats
gt_stats_merge(stats,parameters.num_threads);
/*
* Print Statistics
* Use stats[0]->num_blocks as the number of blocks in a MAP/SAM/FASTA/FASTQ file
* is the number of reads in a FASTA/FASTQ
*/
if(parameters.print_json){
gt_stats_print_json_stats(stats[0],(parameters.num_reads>0)?
parameters.num_reads:stats[0]->num_blocks,parameters.paired_end);
}
if(!parameters.print_json || parameters.print_both){
if (!parameters.compact) {
gt_stats_print_stats(stats[0],(parameters.num_reads>0)?
parameters.num_reads:stats[0]->num_blocks,parameters.paired_end);
} else {
gt_stats_print_stats_compact(stats[0],(parameters.num_reads>0)?
parameters.num_reads:stats[0]->num_blocks,parameters.paired_end);
}
}
// Clean
gt_stats_delete(stats[0]); gt_free(stats);
gt_input_file_close(input_file);
}
void parse_arguments(int argc,char** argv) {
struct option* gt_stats_getopt = gt_options_adaptor_getopt(gt_stats_options);
gt_string* const gt_stats_short_getopt = gt_options_adaptor_getopt_short(gt_stats_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_stats_short_getopt),gt_stats_getopt,&option_index))==-1) break;
switch (option) {
/* I/O */
case 'i': // input
parameters.name_input_file = optarg;
break;
case 200: // mmap-input
parameters.mmap_input = true;
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 'r': // reference
parameters.name_reference_file = optarg;
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 'I': // gem-index
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 'p': // paired-end
parameters.paired_end = true;
break;
case 'n': // num-reads
parameters.num_reads = atol(optarg);
break;
case 'o': // output
parameters.name_output_file = optarg;
break;
case 'f':
if(strcmp("report", optarg) == 0){
parameters.print_json = false;
}else if(strcmp("json", optarg) == 0){
parameters.print_json = true;
}else if(strcmp("both", optarg) == 0){
parameters.print_json = true;
parameters.print_both = true;
}else{
gt_fatal_error_msg("Unknown format %s, supported formats are 'report' or 'json' or 'both'", optarg);
}
break;
/* Analysis */
case 300: // first-map
parameters.first_map = true;
break;
case 'a': // all-tests
parameters.maps_profile = true;
parameters.mismatch_transitions = true;
parameters.mismatch_quality = true;
parameters.splitmaps_profile = true;
parameters.population_profile = true;
break;
case 'M': // maps-profile
parameters.maps_profile = true;
break;
case 'T': // mismatch-transitions
parameters.mismatch_transitions = true;
break;
case 'Q': // mismatch-quality
parameters.mismatch_quality = true;
break;
case 'R': // rna-profile // FIXME name
parameters.splitmaps_profile = true;
break;
case 'P': // population-profile
parameters.population_profile = true;
break;
case 'D': // indel-profile
gt_fatal_error(NOT_IMPLEMENTED);
parameters.indel_profile = true;
break;
/* MAP Specific */
case 400:
parameters.use_only_decoded_maps = true;
break;
/* Misc */
case 't':
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
break;
case 'v':
parameters.verbose = true;
break;
case 'h':
fprintf(stderr, "USE: ./gt.stats [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_stats_options,gt_stats_groups,false,false);
exit(1);
case 'H':
fprintf(stderr, "USE: ./gt.stats [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_stats_options,gt_stats_groups,false,true);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_stats_options,gt_stats_groups,false,true);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
/*
* Checks
*/
if (parameters.indel_profile && parameters.name_reference_file==NULL) {
gt_error_msg("To generate the indel-profile, a reference file(.fa/.fasta) or GEMindex(.gem) is required");
}
// Free
gt_string_delete(gt_stats_short_getopt);
}
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
parameters.output_file = stdout;
parameters.output_file_json = stderr;
// init output paramters
if(parameters.name_output_file != NULL){
parameters.output_file = fopen(parameters.name_output_file, "w");
gt_cond_fatal_error(parameters.output_file==NULL,FILE_OPEN,parameters.name_output_file);
}
if(parameters.print_json && !parameters.print_both){
parameters.output_file_json = parameters.output_file;
}
// Extract stats
gt_stats_parallel_generate_stats();
// close output
if(parameters.name_output_file != NULL){
fclose(parameters.output_file);
}
return 0;
}
|
graph.c | /*
Copyright (c) 2014, Mohammad Moghadasi
Division of System Engineering, Boston University
Structural Bioinformatics Laboratory, Boston University
All rights reserved.
E-mail: <mohamad@bu.edu>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the author nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <libgen.h>
#include <stdarg.h>
#include <stdint.h>
#include _MOL_INCLUDE_
#include "main.h"
#include "parameters.h"
#include "rotamer_placement.h"
#include "pack.h"
#include "mwis.h"
#include "graph.h"
//________________________________________________________________________
// graph.c
//________________________________________________________________________
static void hbondeng_list( struct atomgrp *ag, double *energy, int *list1, int numOfAtomsList1, int *list2, int numOfAtomsList2 , double rc)
{
double rc2 = rc * rc;
( *energy ) = 0;
for(int i=0;i<numOfAtomsList1;i++) {
int ai=list1[i];
mol_atom *atom_i = &( ag->atoms[ ai ] );
if ( !( atom_i->hprop & DONATABLE_HYDROGEN ) && !( atom_i->hprop & HBOND_ACCEPTOR ) ) continue;
for( int j = 0; j < numOfAtomsList2; j++ ) {
int aj = list2[j];
mol_atom *atom_j = &( ag->atoms[ aj ] );
if ( atom_i->hprop & DONATABLE_HYDROGEN )
{
if ( !( atom_j->hprop & HBOND_ACCEPTOR ) ) continue;
( *energy ) += get_pairwise_hbondeng_nblist( ag->atoms, ai, ag->atoms, aj, NULL, rc2, 1, 1.0 );
}
else
{
if ( !( atom_j->hprop & DONATABLE_HYDROGEN ) ) continue;
( *energy ) += get_pairwise_hbondeng_nblist( ag->atoms, aj, ag->atoms, ai, NULL, rc2, 1, 1.0 );
}
}
}
}
static void get_list_noRes_atoms( struct atomgrp *ag , int resi_num , int* noRes_list , int* natoms_noRes)
{
int nat;
int first_at;
int ind = 0 ;
for( int res = 0 ; res < ag->nres ; res++){
if( res != resi_num ){
nat = ag->iares[res+1] - ag->iares[res];
first_at = ag->iares[res];
for( int k = 0 ; k < nat ; k++ )
noRes_list[ ind + k ] = first_at + k ;
ind += nat ;
}
}
natoms_noRes[0] = ind ;
}
static void get_list_noIF_atoms( struct atomgrp *ag , struct ifres_list* res_list , int* noIF_list , int* natoms_noIF)
{
int nresi = ag->nres;
int nIFres = res_list->num_of_ifres;
int nat;
int indicator;
int first_at;
// int last_at;
int ind = 0 ;
for( int res = 0 ; res < nresi ; res++){
indicator = 1 ; // noIF res
for(int j = 0 ; j < nIFres ; j++){
if( res == res_list->ifres_num[j]){
indicator = 0;
j = nIFres ;
}
}
if( indicator == 1){
nat = ag->iares[res+1] - ag->iares[res];
first_at = ag->iares[res] ;
for( int k = 0 ; k < nat ; k++ )
noIF_list[ ind + k ] = first_at + k ;
ind += nat ;
}
}
natoms_noIF[0] = ind ;
}
void graph_t_malloc( struct graph_t** graph , int MAX_NODE , int N_CLIQUES)
{
*graph = (struct graph_t*) _mol_malloc (sizeof (struct graph_t));
(*graph)->weight = (double*) _mol_malloc(MAX_NODE * sizeof(double)); // NEW
(*graph)->adjacency_matrix = (unsigned char*) _mol_malloc(MAX_NODE * MAX_NODE * sizeof(unsigned char)); // New
(*graph)->clique_list = (int**) _mol_malloc(N_CLIQUES * sizeof(int*));
for(int i = 0 ; i < N_CLIQUES ; i++)
(*graph)->clique_list[i] = (int*) _mol_malloc(MAX_NODE * sizeof(int));
(*graph)->clique_size = (int*) _mol_malloc(N_CLIQUES * sizeof(int));
(*graph)->cliquesof_i = (int**) _mol_malloc(MAX_NODE * sizeof(int*));
for(int i = 0 ; i < MAX_NODE ; i++)
(*graph)->cliquesof_i[i] = (int*) _mol_malloc(N_CLIQUES * sizeof(int));
(*graph)->node_nClq = (int*) _mol_malloc(MAX_NODE * sizeof(int));
}
void weight_edit(struct graph_t* graph)
{
// Re-ordering the energies
LARGE = graph->weight[0];
MINIM = graph->weight[0];
for ( int i = 0 ; i < graph->vertex_count ; i++){
LARGE = (graph->weight[i]>LARGE) ? graph->weight[i] : LARGE;
MINIM = (graph->weight[i]<MINIM) ? graph->weight[i] : MINIM;
}
LARGE = LARGE + 5;
// printf("Large = %f Min = %f\n", LARGE, MINIM);
for ( int i = 0 ; i < graph->vertex_count ; i++){
graph->weight[i] = LARGE - graph->weight[i] ;
}
}
void Free_graph_t( struct graph_t** graph , int MAX_NODE, int N_CLIQUES )
{
free((*graph)->adjacency_matrix);
free((*graph)->weight);
free(*graph);
for(int i = 0 ; i < N_CLIQUES ; i++)
free((*graph)->clique_list[i]);
free((*graph)->clique_list);
free((*graph)->clique_size);
for(int i = 0 ; i < MAX_NODE ; i++)
free((*graph)->cliquesof_i[i]);
free((*graph)->cliquesof_i);
free((*graph)->node_nClq);
}
//
void eng_linklist_gen( struct atomgrp *RecLigAg , struct ifres_list* res_list , struct node_energy* energy , struct link_list* ln_list , struct rot_info* rotinf[])
{
int resi; // residue id for i
int resj; // residue id for j
int list1[20];
int list2[20];
int list1_size;
int list2_size;
struct agsetup* ags;
ags=malloc(sizeof(struct agsetup));
// init_nblst(RecLigAg , ags);
// update_nblst(RecLigAg , ags);
// List of non-Interface atoms in the backbone for self energy purposes
int noIF_list[RecLigAg->natoms];
int natoms_noIF;
get_list_noIF_atoms ( RecLigAg , res_list , noIF_list , &natoms_noIF);
int noRes_list[RecLigAg->natoms];
int natoms_noRes;
// Initializaing the Graph Structs
// #pragma omp parallel for
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
for(int j = 0 ; j < res_list->num_of_ifres ; j++){
for( int ri = 0 ; ri < rotinf[i]->nrotamers + 1 ; ri++){
for( int rj = 0 ; rj < rotinf[j]->nrotamers + 1 ; rj++){
energy->residues[i][j].rotamers[ri][rj] = 0 ;
ln_list->residues[i][j].rotamers[ri][rj] = 0 ;
}
}
}
}
energy->nresidue = res_list->num_of_ifres ;
ln_list->nresidue = res_list->num_of_ifres ;
// Filling the Graph Structs
double noIF_eng;
double inter_eng;
double hbond_eng;
double rc = 5;
double prob_eng;
double prob1 ;
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
energy->nrotamers[i] = rotinf[i]->nrotamers ;
ln_list->nrotamers[i] = rotinf[i]->nrotamers ;
resi = res_list->ifres_num[i];
get_list( RecLigAg, resi , list1 , &list1_size);
get_list_noRes_atoms ( RecLigAg , resi , noRes_list , &natoms_noRes);
for( int ri = 0 ; ri < rotinf[i]->nrotamers + 1 ; ri++){
apply_rot_to_atmgrp( RecLigAg, rotinf[i], resi , ri ); // +1 : since rotamer indices starts at 1 IMP ***
vdweng_inter( RecLigAg, rc, list1, list1_size, noIF_list, natoms_noIF, &noIF_eng);
prob1 = rotinf[i]->rot[ri].probability;
prob_eng = -1 * KT * log(prob1/P0);
energy->residues[i][i].rotamers[ri][ri]= vdw_noIF_coef*noIF_eng + prob1_coef*prob_eng; //+vdw_self_coef*self_eng;
if( (energy->residues[i][i].rotamers[ri][ri] < big_eng) || (ri == 0) ){
ln_list->residues[i][i].rotamers[ri][ri] = 1 ;
for(int j = i+1 ; j < res_list->num_of_ifres ; j++){
resj = res_list->ifres_num[j];
get_list( RecLigAg, resj , list2 , &list2_size);
for( int rj = 0 ; rj < rotinf[j]->nrotamers + 1 ; rj++){
apply_rot_to_atmgrp( RecLigAg, rotinf[j], resj , rj );// +1 since rot indices starts at 1
vdweng_inter( RecLigAg, rc, list1, list1_size, list2, list2_size, &inter_eng);
hbond_eng = 0 ;
hbondeng_list( RecLigAg , &hbond_eng, list1, list1_size, list2, list2_size , rc);
energy->residues[i][j].rotamers[ri][rj]= vdw_inter_coef * inter_eng + hbond_coef * hbond_eng;
energy->residues[j][i].rotamers[rj][ri]= energy->residues[i][j].rotamers[ri][rj];
if( (energy->residues[i][j].rotamers[ri][rj] < big_eng) || (ri == 0) || (rj == 0) ) {
ln_list->residues[i][j].rotamers[ri][rj] = 1 ;
ln_list->residues[j][i].rotamers[rj][ri] = 1 ;
}
}
apply_rot_to_atmgrp( RecLigAg, rotinf[j], resj , 0 );
}
}
}
apply_rot_to_atmgrp( RecLigAg, rotinf[i], resi , 0 );
}
free(ags);
}
//================================
void node_counter( struct ifres_list* res_list , struct link_list* ln_list , struct rot_info* rotinf[], int* nnodes)
{
int node_cnt = 0 ;
if( res_list->num_of_ifres == 2 ){ // Two IF residues : New Version of Graph :: Triple to One
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
for(int j = i+1 ; j < res_list->num_of_ifres ; j++){ // New Graph
for( int ri = 0 ; ri < rotinf[i]->nrotamers+1 ; ri++){
for( int rj = 0 ; rj < rotinf[j]->nrotamers+1 ; rj++){
if( ln_list->residues[i][j].rotamers[ri][rj] == 1 && ln_list->residues[i][i].rotamers[ri][ri] == 1 && ln_list->residues[j][j].rotamers[rj][rj] == 1){
node_cnt++;
}
}
}
}
}
}
else { // More than Two IF residues : Old Version of Graph
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
for(int j = i ; j < res_list->num_of_ifres ; j++){ // Old Graph
for( int ri = 0 ; ri < rotinf[i]->nrotamers+1 ; ri++){
for( int rj = 0 ; rj < rotinf[j]->nrotamers+1 ; rj++){
if( ln_list->residues[i][j].rotamers[ri][rj] == 1 && ln_list->residues[i][i].rotamers[ri][ri] == 1 && ln_list->residues[j][j].rotamers[rj][rj] == 1){
node_cnt++;
}
}
}
}
}
}
*nnodes = node_cnt;
}
//================================
void graph_construction( struct graph_t* graph , struct ifres_list* res_list , struct link_list* ln_list , struct node_energy* energy , struct rot_info* rotinf[] , int node_info[][4] , int nnodes)
{
graph->vertex_count = nnodes ;
graph->ncliques = res_list->num_of_ifres + res_list->num_of_ifres*(res_list->num_of_ifres - 1)/2;
int node_ind = 0 ;
int clique_ind = -1 ;
int clqnod_ind[graph->ncliques];
int nodescliq_ind[nnodes]; // nodescliq_ind[i] : index for the cliques of node i
if( res_list->num_of_ifres == 2 ){ // Two IF residues : New Version of Graph :: Triple to One
// #pragma omp parallel for
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
for(int j = i+1 ; j < res_list->num_of_ifres ; j++){ // New Graph
for( int ri = 0 ; ri < rotinf[i]->nrotamers+1 ; ri++){
for( int rj = 0 ; rj < rotinf[j]->nrotamers+1 ; rj++){
if( ln_list->residues[i][j].rotamers[ri][rj] == 1 && ln_list->residues[i][i].rotamers[ri][ri] == 1
&& ln_list->residues[j][j].rotamers[rj][rj] == 1){
graph->weight[node_ind] = energy->residues[i][i].rotamers[ri][ri]
+ energy->residues[j][j].rotamers[rj][rj]
+ energy->residues[i][j].rotamers[ri][rj];
node_info[node_ind][0] = i;
node_info[node_ind][1] = j;
node_info[node_ind][2] = ri;
node_info[node_ind][3] = rj;
node_ind++;
}
}
}
}
}
}
else { // More than Two IF residues : Old Version of Graph
// #pragma omp parallel for
for(int i = 0 ; i < nnodes ; i++) nodescliq_ind[i] = 0;
// #pragma omp parallel for
for( int i = 0 ; i < res_list->num_of_ifres ; i++ ){
for(int j = i ; j < res_list->num_of_ifres ; j++){ // Old Graph
clique_ind++;
clqnod_ind[clique_ind] = 0;
for( int ri = 0 ; ri < rotinf[i]->nrotamers+1 ; ri++){
for( int rj = 0 ; rj < rotinf[j]->nrotamers+1 ; rj++){
if( ln_list->residues[i][j].rotamers[ri][rj] == 1 && ln_list->residues[i][i].rotamers[ri][ri] == 1
&& ln_list->residues[j][j].rotamers[rj][rj] == 1){
graph->weight[node_ind] = energy->residues[i][j].rotamers[ri][rj] ;
node_info[node_ind][0] = i;
node_info[node_ind][1] = j;
node_info[node_ind][2] = ri;
node_info[node_ind][3] = rj;
// Adding Clique Info. of K-cliques when K > 2
graph->clique_list[clique_ind][clqnod_ind[clique_ind]] = node_ind;
graph->cliquesof_i[node_ind][nodescliq_ind[node_ind]] = clique_ind;
nodescliq_ind[node_ind]++;
node_ind++;
clqnod_ind[clique_ind]++;
}
}
}
}
}
for(int i = 0 ; i < graph->ncliques ; i++)
graph->clique_size[i] = clqnod_ind[i];
for(int i = 0 ; i < nnodes ; i++)
graph->node_nClq[i] = nodescliq_ind[i];
}
// adjacency_matrix generation
// #pragma omp parallel for
for(unsigned int i = 0 ; i < graph->vertex_count ; i++){
graph_edge(graph, i, i) = 0;
for(unsigned int j = i+1 ; j < graph->vertex_count ; j++){
if( ( (node_info[i][0] == node_info[j][0]) & (node_info[i][2] != node_info[j][2]) ) |
( (node_info[i][1] == node_info[j][1]) & (node_info[i][3] != node_info[j][3]) ) |
( (node_info[i][0] == node_info[j][1]) & (node_info[i][2] != node_info[j][3]) ) |
( (node_info[i][1] == node_info[j][0]) & (node_info[i][3] != node_info[j][2]))) {
graph_edge(graph, i, j) = 1;
graph_edge(graph, j, i) = 1;
}
else{
graph_edge(graph, i, j) = 0;
graph_edge(graph, j, i) = 0;
}
}
}
}
//=====================================
void add_2cliques_to_graph( struct graph_t* graph)
{
int clique_ind = graph->ncliques;
for(int i = 0 ; i < graph->vertex_count ; i++)
for(int j = i+1 ; j < graph->vertex_count ; j++)
if(graph_edge(graph, i, j)){
graph->clique_list[clique_ind][0] = i;
graph->clique_list[clique_ind][1] = j;
graph->cliquesof_i[i][graph->node_nClq[i]] = clique_ind;
graph->cliquesof_i[j][graph->node_nClq[j]] = clique_ind;
graph->clique_size[clique_ind] = 2;
clique_ind++;
graph->node_nClq[i]=graph->node_nClq[i]+1;
graph->node_nClq[j]=graph->node_nClq[j]+1;
}
graph->ncliques = clique_ind;
}
//=====================================
//________________________________________________________________________
// dee.c
//________________________________________________________________________
#ifndef minimum
#define minimum( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
void dee( struct node_energy* energy , struct link_list* ln_list )
{
// Evaluating all the Best & Worst scenarios for all rotamers of all residues
float best[energy->nresidue][MAX_ROT];
float worst[energy->nresidue][MAX_ROT];
float MAX;
float MIN;
for( int r = 0 ; r < energy->nresidue ; r++ ){
for( int i = 0 ; i <= energy->nrotamers[r] ; i++){ // r_i : residue r * rotamer i
best[r][i] = 0;
worst[r][i] = 0;
best[r][i] += energy->residues[r][r].rotamers[i][i];
worst[r][i] += energy->residues[r][r].rotamers[i][i];
for ( int s = 0 ; s < energy->nresidue ; s++ ){
if( s != r){
MAX = energy->residues[r][s].rotamers[i][0];
MIN = energy->residues[r][s].rotamers[i][0];
for( int j = 0 ; j <= energy->nrotamers[s] ; j++){ // s_j : residue s * rotamer j
MAX = (energy->residues[r][s].rotamers[i][j]>MAX) ? energy->residues[r][s].rotamers[i][j]:MAX;
MIN = (energy->residues[r][s].rotamers[i][j]<MIN) ? energy->residues[r][s].rotamers[i][j]:MIN;
}
best[r][i] += MIN ;
worst[r][i] += MAX ;
}
}
}
}
// Elimination Part
for( int r = 0 ; r < energy->nresidue ; r++ ){
for( int i1 = 0 ; i1 <= energy->nrotamers[r] ; i1++){
for( int i2 = 0 ; i2 <= energy->nrotamers[r] ; i2++ ){
if( (best[r][i1] > worst[r][i2]) && (i1 != 0) ) { // rotamer i2 beats rotamer i1
ln_list->residues[r][r].rotamers[i1][i1]=0;
break;
}
}
}
}
for( int r = 0 ; r < energy->nresidue ; r++ ){
for( int i = 0 ; i <= energy->nrotamers[r] ; i++){
if( ln_list->residues[r][r].rotamers[i][i] == 0){
for( int s = 0 ; s < energy->nresidue ; s++ ){
for( int j = 0 ; j <= energy->nrotamers[s] ; j++){
ln_list->residues[r][s].rotamers[i][j] = 0;
}
}
}
}
}
}
// Memory Allocation node_enegy
void node_energy_malloc( struct node_energy** energy , int MAX_RES1 ){ // NEW
*energy = (struct node_energy*) _mol_malloc (sizeof (struct node_energy ));
(*energy)->residues = (struct eng_residue**) _mol_malloc ( MAX_RES1 * sizeof (struct eng_residue*));
for(int i = 0 ; i < MAX_RES1 ; i++)
(*energy)->residues[i] = (struct eng_residue*) _mol_malloc ( MAX_RES1 * sizeof (struct eng_residue));
for(int i = 0 ; i < MAX_RES1 ; i++)
for(int j = 0 ; j < MAX_RES1 ; j++)
(*energy)->residues[i][j].rotamers = (double**) _mol_malloc( MAX_ROT * sizeof(double*) );
for(int i = 0 ; i < MAX_RES1 ; i++)
for(int j = 0 ; j < MAX_RES1 ; j++)
for(int k = 0 ; k < MAX_ROT ; k++)
(*energy)->residues[i][j].rotamers[k] = (double*) _mol_malloc( MAX_ROT * sizeof(double) );
(*energy)->nrotamers = malloc ( MAX_RES1 * sizeof(int) );
}
void Free_node_energy( struct node_energy** energy , int MAX_RES1 )
{
free( (*energy)->nrotamers );
for(int i = 0 ; i < MAX_RES1 ; i++){
for(int j = 0 ; j < MAX_RES1 ; j++){
for(int k = 0 ; k < MAX_ROT ; k++){
free( (*energy)->residues[i][j].rotamers[k] );
}
}
}
for(int i = 0 ; i < MAX_RES1 ; i++){
for(int j = 0 ; j < MAX_RES1 ; j++){
free( (*energy)->residues[i][j].rotamers);
}
}
for(int i = 0 ; i < MAX_RES1 ; i++){
free( (*energy)->residues[i] );
}
free((*energy)->residues);
free(*energy );
}
// Memory Allocation link_list
void link_list_malloc( struct link_list** ln_list , int MAX_RES1 ) // NEW
{
*ln_list = (struct link_list*) _mol_malloc (sizeof (struct link_list));
(*ln_list)->residues = (struct rotamer_pair**) _mol_malloc ( MAX_RES1 * sizeof (struct rotamer_pair*));
for(int i = 0 ; i < MAX_RES1 ; i++)
(*ln_list)->residues[i] = (struct rotamer_pair*) _mol_malloc ( MAX_RES1 * sizeof (struct rotamer_pair));
for(int i = 0 ; i < MAX_RES1 ; i++)
for(int j = 0 ; j < MAX_RES1 ; j++)
(*ln_list)->residues[i][j].rotamers = (int**) _mol_malloc (MAX_ROT * sizeof(int*) );
for(int i = 0 ; i < MAX_RES1 ; i++)
for(int j = 0 ; j < MAX_RES1 ; j++)
for(int k = 0 ; k < MAX_ROT ; k++)
(*ln_list)->residues[i][j].rotamers[k] = (int*) _mol_malloc (MAX_ROT * sizeof(int));
(*ln_list)->nrotamers = malloc ( MAX_RES1 * sizeof(int) );
}
//
void Free_link_list ( struct link_list** ln_list , int MAX_RES1 )
{
free((*ln_list)->nrotamers);
for(int i = 0 ; i < MAX_RES1 ; i++){
for(int j = 0 ; j < MAX_RES1 ; j++){
for(int k = 0 ; k < MAX_ROT ; k++){
free( (*ln_list)->residues[i][j].rotamers[k] );
}
}
}
for(int i = 0 ; i < MAX_RES1 ; i++){
for(int j = 0 ; j < MAX_RES1 ; j++){
free( (*ln_list)->residues[i][j].rotamers );
}
}
for(int i = 0 ; i < MAX_RES1 ; i++){
free((*ln_list)->residues[i]);
}
free((*ln_list)->residues);
free(*ln_list);
}
// vdweng_list.c %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#define CUTOFF_LINE_SCALE 0.6
#define CUTOFF_REP_SCALE 0.89
void vdweng_inter(struct atomgrp *ag, double rc, int *list1, int numOfAtomsList1, int *list2, int numOfAtomsList2, double *eng){
int i, i1, j, i2;
double ei, ri, ej, rj, x1, y1, z1, dx, dy, dz, eij;
double d2, Rd12, Rd6, Rr12, Rr6, dr6;
double rij,rij2;
// double dven, g;
struct atom *a1, *a2;
double rc2=rc*rc;
double a,b,d;
double cutoff_line_2, cutoff_rep_2;
double cutoff_rep_scale_2;
double cutoff_line_scale_2, cutoff_line_scale_6, cutoff_line_scale_min_6, cutoff_line_scale_min_12;
double *eng_rep = malloc(sizeof(double));
double *eng_atr = malloc(sizeof(double));
*eng_rep = 0;
*eng_atr = 0;
(*eng) = 0 ;
cutoff_rep_scale_2 = CUTOFF_REP_SCALE*CUTOFF_REP_SCALE;
cutoff_line_scale_2 = CUTOFF_LINE_SCALE*CUTOFF_LINE_SCALE;
cutoff_line_scale_6 = pow(CUTOFF_LINE_SCALE,6);
cutoff_line_scale_min_6 = pow(CUTOFF_LINE_SCALE,-6);
cutoff_line_scale_min_12 = pow(CUTOFF_LINE_SCALE,-12);
for(i=0;i<numOfAtomsList1;i++){
i1=list1[i];
a1=&(ag->atoms[i1]);
ei=a1->eps;
ri=a1->rminh;
x1=ag->atoms[i1].X;
y1=ag->atoms[i1].Y;
z1=ag->atoms[i1].Z;
for(j=0;j<numOfAtomsList2;j++){
i2=list2[j];
a2=&(ag->atoms[i2]);
rj=a2->rminh;
ej=a2->eps;
dx=x1-ag->atoms[i2].X;
dy=y1-ag->atoms[i2].Y;
dz=z1-ag->atoms[i2].Z;
d2=dx*dx+dy*dy+dz*dz;
if(d2<rc2){
eij = ei*ej;
rij = ri+rj;
rij2 = rij*rij;//artem modification for rij
cutoff_line_2 = cutoff_line_scale_2*rij2;
cutoff_rep_2 = cutoff_rep_scale_2*rij2;
if(d2<cutoff_line_2){
Rr6=rij2/rc2;
Rr6=Rr6*Rr6*Rr6;
Rr12=Rr6*Rr6;
d = sqrt(d2);
a = cutoff_line_scale_min_12 - 2*cutoff_line_scale_min_6 + Rr6*(4.0-2*cutoff_line_scale_6*Rr6) + Rr12*(2*cutoff_line_scale_6*Rr6-3.0);
b = 12.0*(-cutoff_line_scale_min_12 + cutoff_line_scale_min_6 + cutoff_line_scale_6*Rr6*(Rr12-Rr6))/(CUTOFF_LINE_SCALE*rij);
(*eng_rep) += eij*(a + (d - CUTOFF_LINE_SCALE*rij)*b);
//eng = eij*(a + (d - cutoff_line_scale*rij)*b);
//dven = -eij*b/d;
//printf("line eng at %f: %f, deriv: %f\n",d,*eng_rep,dven);
}else if(d2<cutoff_rep_2){
Rd6=rij2/d2;
Rd6=Rd6*Rd6*Rd6;
Rd12=Rd6*Rd6;
Rr6=rij2/rc2;
Rr6=Rr6*Rr6*Rr6;
Rr12=Rr6*Rr6;
dr6=d2/rc2;
dr6=dr6*dr6*dr6;
(*eng_rep) += eij*(Rd12 - 2*Rd6 + Rr6*(4.0-2*dr6) + Rr12*(2*dr6-3.0));
//eng = eij*(Rd12 - 2*Rd6 + Rr6*(4.0-2*dr6) + Rr12*(2*dr6-3.0));
//dven = -eij*12*(-Rd12+Rd6+dr6*(Rr12-Rr6))/d2;
}else{
Rd6=rij2/d2;
Rd6=Rd6*Rd6*Rd6;
Rd12=Rd6*Rd6;
Rr6=rij2/rc2;
Rr6=Rr6*Rr6*Rr6;
Rr12=Rr6*Rr6;
dr6=d2/rc2;
dr6=dr6*dr6*dr6;
(*eng_atr) += eij*(Rd12 - 2*Rd6 + Rr6*(4.0-2*dr6) + Rr12*(2*dr6-3.0));
//dven = -eij*12*(-Rd12+Rd6+dr6*(Rr12-Rr6))/d2;
}
/*
g=dven*dx;
(a1->GX)+=g;
(a2->GX)-=g;
g=dven*dy;
(a1->GY)+=g;
(a2->GY)-=g;
g=dven*dz;
(a1->GZ)+=g;
(a2->GZ)-=g;
*/
}
}
}
*eng = *eng_rep + *eng_atr;
free(eng_rep);
free(eng_atr);
}
void get_list( struct atomgrp *ag , int resid , int* list, int* list_size)
{
list_size[0] = ag->iares[resid+1] - ag->iares[resid];
int first_at = ag->iares[resid] ;
// int last_at = ag->iares[resid+1] - 1;
for ( int i = 0 ; i < list_size[0] ; i++)
list[i] = first_at + i ;
}
//
|
compiled.c | /*
Differential evolution MCMC stepper.
*/
#define _GNU_SOURCE // sincos isn't standard?
#include <math.h>
#include <stdlib.h>
#include <stdio.h> // for debugging
// Random library with a separate generator for each thread of
// an OpenMP threaded program. Assumes max 64 threads. If OpenMP is
// not available, then operates single threaded.
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
// Limit to the number of threads so static thread-local data can be
// pre-allocated with the right size.
#define MAX_THREADS 64
// ==== Generator definition ====
// Uses:
// Salmon, J. K.; Moraes, M. A.; Dror, R. O.; Shaw, D. E. (2011)
// Parallel random numbers: as easy as 1, 2, 3. In Proceedings of 2011
// International Conference for High Performance Computing, Networking,
// Storage and Analysis; SC '11; ACM: New York, NY; p 16:1016:12.
// doi: 10.1145/2063384.2063405
// https://www.deshawresearch.com/resources_random123.html v1.09
// may want to swap it for a different generator, and update the following
#include <Random123/threefry.h>
typedef threefry4x64_ctr_t r123_ctr_t;
typedef threefry4x64_key_t r123_key_t;
typedef threefry4x64_ukey_t r123_ukey_t;
#define r123_init threefry4x64keyinit
#define r123_next threefry4x64
#define R123_SIZE 4 // the 4 in 4x64
typedef uint64_t randint_t; // the 64 in 4x64
const randint_t R123_MAX = 18446744073709551615UL;
const double R123_TO_01 = 1.0/18446744073709551616.0;
const double R123_TO_M11 = 2.0/18446744073709551616.0;
// ==== end generator definition ====
typedef struct {
r123_ctr_t counter; // position in sequence
r123_key_t key; // seed
r123_ctr_t values; // cached values not yet used
int have_normal; // Have a precomputer random normal
double normal; // the precomputed random normal
} Random;
Random streams[MAX_THREADS]; // Max of 64 different threads in OpenMP
double u_01_open(randint_t v) {
return (((double)v) + 0.5)*R123_TO_01;
}
double u_m11_closed(randint_t v) {
return ((double)((int64_t)v) + 0.5)*R123_TO_M11;
}
void _rand_init(randint_t seed)
{
int thread_id = omp_get_thread_num();
Random *rng = streams + thread_id;
r123_ukey_t user_key;
r123_key_t counter;
int k;
if (thread_id >= MAX_THREADS) {
printf("Too many threads for random number generator. Set OMP_NUM_THREADS=%d\n",
MAX_THREADS);
exit(1);
}
for (k = 0; k < R123_SIZE; k++) user_key.v[k] = counter.v[k] = 0;
user_key.v[0] = seed;
//user_key.v[1] = omp_get_thread_num();
rng->key = r123_init(user_key);
rng->counter = counter;
//printf("%d initializing %p with seed %llu and counter %llu\n", omp_get_thread_num(), rng, rng->key.v[0], rng->counter.v[0]);
rng->have_normal = 0;
}
void rand_init(randint_t seed)
{
#ifdef _OPENMP
#pragma omp parallel
#endif
_rand_init(seed);
}
randint_t rand_next(void)
{
Random *rng = streams+omp_get_thread_num();
//printf("retrieving from %p with key %ld and counter %ld\n",rng, rng->key.v[0], rng->counter.v[0]);
if (rng->counter.v[0]%R123_SIZE == 0) {
rng->values = r123_next(rng->counter, rng->key);
}
return rng->values.v[(rng->counter.v[0]++)%R123_SIZE];
}
double randn(void)
{
Random *rng = &streams[omp_get_thread_num()];
if (rng->have_normal) {
rng->have_normal = 0;
return rng->normal;
} else {
// Box-Muller transform converts two ints into two normals
// Return one now and save the other for later.
double x, y, r, arg;
arg = M_PI*u_m11_closed(rand_next());
x = sin(arg);
y = cos(arg);
r = sqrt(-2. * log(u_01_open(rand_next())));
rng->have_normal = 1;
rng->normal = y*r;
return x*r;
}
}
randint_t randint(randint_t range)
{
while (1) {
randint_t value = rand_next();
// TODO: correct for very tiny bias against higher numbers.
// Something like the following?
// if (value > R123_MAX-(R123_MAX%range)) continue;
return value%range;
}
}
double randu(void)
{
return u_01_open(rand_next());
}
/* draw k unique from n objects not equal to q */
// Specialized for k << n. If n is large and k -> n then argsort on
// a random uniform draw is a better bet. If you don't want to exclude
// any numbers, set not_matching to total_num.
void rand_draw(int num_to_draw, int total_num, randint_t not_matching,
randint_t p[])
{
int i, j;
for (i=0; i < num_to_draw; i++) {
while (1) {
int proposed = randint(total_num);
int unique = (proposed != not_matching);
for (j=0; j < i && unique; j++) unique = (proposed != p[j]);
if (unique) {
p[i] = proposed;
break;
}
// TODO: maybe check that num_to_draw is total_num + 1
}
}
}
#if 0
#include <stdio.h>
#include <string.h>
#include <time.h>
randint_t random_seed()
{
randint_t seed;
FILE* urandom = fopen("/dev/urandom", "r");
fread(&seed, sizeof(seed), 1, urandom);
fclose(urandom);
return seed;
}
void main(int argc, char *argv[])
{
int j, k;
randint_t seed, draw[10];
seed = (argc == 1 ? random_seed() : atoi(argv[1]));
printf("seed: %ld\n", seed);
rand_init(seed);
printf("i randint(10):\n");
#pragma omp parallel for
for (k=0; k < 10; k++) printf("i %d %ld\n", omp_get_thread_num(), randint(10));
printf("u randu:\n");
#pragma omp parallel for
for (k=0; k < 10; k++) printf("u %d %g\n", omp_get_thread_num(), randu());
printf("n randn:\n");
#pragma omp parallel for
for (k=0; k < 10; k++) printf("n %d %g\n", omp_get_thread_num(), randn());
printf("d rand_draw(10,52,!5):\n");
#pragma omp parallel for private(draw, j)
for (k=0; k < 10; k++) {
char buf[200];
rand_draw(10, 52, 5, draw);
sprintf(buf, "d %d ", omp_get_thread_num());
for (j=0; j < 10; j++) sprintf(buf+strlen(buf), "%ld ", draw[j]);
printf("%s\n", buf);
}
}
#endif
#define _SNOOKER 0
#define _DE 1
#define _DIRECT 2
#define EPS 1e-6
#define MAX_CHAINS 20
/*
Generates offspring using METROPOLIS HASTINGS monte-carlo markov chain
The number of chains may be smaller than the population size if the
population is selected from both the current generation and the
ancestors.
*/
void
_perform_step(int qq, int Nchain, int Nvar, int NCR,
double pop[], double CR[][2],
int max_pairs, double eps,
double snooker_rate, double de_rate, double noise, double scale,
double x_new[], double step_alpha[], double CR_used[])
{
randint_t chains[2*MAX_CHAINS];
double u = randu();
int alg = (u < snooker_rate ? _SNOOKER : u < de_rate ? _DE : _DIRECT);
double *xin = &pop[qq*Nvar];
int k;
//for (k=0; k < NCR; k++) printf("CR %d: %g %g\n", k, CR[k][0], CR[k][1]);
//printf("pop in c: ");
//for (k=0; k < Nvar; k++) printf("%g ", pop[qq*Nvar+k]);
//printf("\n");
switch (alg) {
case _DE: // Use DE with cross-over ratio
{
int var, num_crossover, active;
double crossover_ratio, CR_cdf, distance, jiggle;
// Select to number of vector pair differences to use in update
// using k ~ discrete U[1, max pairs]
int num_pairs = randint(max_pairs)+1;
// [PAK: same as k = DEversion[qq, 1] in matlab version]
// Weight the size of the jump inversely proportional to the
// number of contributions, both from the parameters being
// updated and from the population defining the step direction.
double gamma_scale = 2.38/sqrt(2 * Nvar * num_pairs);
// [PAK: same as F=Table_JumpRate[len(vars), k] in matlab version]
// Select 2*k members at random different from the current member
rand_draw(2*num_pairs, Nchain, qq, chains);
// Select crossover ratio
u = randu();
CR_cdf = 0.;
for (k=0; k < NCR-1; k++) {
CR_cdf += CR[k][1];
if (u <= CR_cdf) break;
}
crossover_ratio = CR[k][0];
CR_used[qq] = crossover_ratio;
// Select the dims to update based on the crossover ratio, making
// sure at least one dim is selected
num_crossover = 0;
for (var=0; var < Nvar || num_crossover == 0; var++) {
if (var == Nvar) {
active = randint(Nvar);
} else if (randu() <= crossover_ratio) {
active = var;
} else {
x_new[var] = 0.;
continue;
}
num_crossover++;
// Find and average step from the selected pairs
distance = 0.;
for (k=0; k < num_pairs; k++) {
distance += pop[chains[2*k]*Nvar + active] - pop[chains[2*k+1]*Nvar + active];
}
// Apply that step with F scaling and noise
jiggle = 1 + eps * (2 * randu() - 1);
x_new[active] = jiggle*gamma_scale*distance;
}
step_alpha[qq] = 1.;
break;
}
case _SNOOKER: // Use snooker update
{
double num, denom, gamma_scale;
// Select current and three others
rand_draw(3, Nchain, qq, chains);
double *z = &pop[chains[0]*Nvar];
double *R1 = &pop[chains[1]*Nvar];
double *R2 = &pop[chains[2]*Nvar];
// Find the step direction and scale it to the length of the
// projection of R1-R2 onto the step direction.
// TODO: population sometimes not unique!
for (k=0; k < Nvar; k++) x_new[k] = xin[k] - z[k];
while (1) {
denom = 0.; for (k=0; k < Nvar; k++) denom += x_new[k]*x_new[k];
if (denom != 0.) break;
for (k=0; k < Nvar; k++) x_new[k] = EPS*randn();
}
num = 0.; for (k=0; k < Nvar; k++) num += ((R1[k]-R2[k])*x_new[k]);
// Step using gamma of 2.38/sqrt(2) + U(-0.5, 0.5)
gamma_scale = (1.2 + randu())*num/denom;
for (k=0; k < Nvar; k++) x_new[k] *= gamma_scale;
// Scale Metropolis probability by (||xi* - z||/||xi - z||)^(d-1)
num = 0.;
for (k=0; k < Nvar; k++)
num += (xin[k]+x_new[k]-z[k])*(xin[k]+x_new[k]-z[k]);
step_alpha[qq] = pow(num/denom, (Nvar-1)/2);
CR_used[qq] = 0.;
break;
}
case _DIRECT: // Use one pair and all dimensions
{
// Note that there is no F scaling, dimension selection or noise
int p[2];
rand_draw(2, Nchain, qq, chains);
double *R1 = &pop[chains[0]*Nvar];
double *R2 = &pop[chains[1]*Nvar];
for (k=0; k < Nvar; k++) x_new[k] = R1[k] - R2[k];
step_alpha[qq] = 1.;
CR_used[qq] = 0.;
break;
}
}
//printf("%d -> ", alg);
//for (k=0; k < Nvar; k++) printf("%g ", x_new[k]);
//printf("\n");
// Update x_old with delta_x and noise
for (k=0; k < Nvar; k++) x_new[k] *= scale;
// [PAK] The noise term needs to depend on the fitting range
// of the parameter rather than using a fixed noise value for all
// parameters. The current parameter value is a pretty good proxy
// in most cases (i.e., relative noise), but it breaks down if the
// parameter is zero, or if the range is something like 1 +/- eps.
// absolute noise
//for (k=0; k < Nvar; k++) x_new[k] += xin[k] + scale*noise*randn();
// relative noise
for (k=0; k < Nvar; k++) x_new[k] += xin[k]*(1.+scale*noise*randn());
//printf("%d -> ", alg);
//for (k=0; k < Nvar; k++) printf("%g ", x_new[k]);
//printf("\n");
// no noise
//for (k=0; k < Nvar; k++) x_new[k] += xin[k];
}
void
de_step(int Nchain, int Nvar, int NCR,
double pop[], double CR[][2],
int max_pairs, double eps,
double snooker_rate, double noise, double scale,
double x_new[], double step_alpha[], double CR_used[])
{
int qq;
double de_rate = snooker_rate + 0.8 * (1-snooker_rate);
//Choose snooker, de or direct according to snooker_rate, and 80:20
// ratio of de to direct.
// Chains evolve using information from other chains to create offspring
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (qq = 0; qq < Nchain; qq++) {
_perform_step(qq, Nchain, Nvar, NCR, pop, CR,
max_pairs, eps, snooker_rate, de_rate,
noise, scale, &x_new[qq*Nvar], step_alpha, CR_used);
}
}
void bounds_reflect(int Nchain, int Nvar, double pop[], double low[], double high[])
{
int k, p, idx;
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (p=0; p < Nchain; p++) {
for (k=0; k < Nvar; k++) {
idx = p*Nvar+k;
if (pop[idx] < low[k]) {
pop[idx] = 2*low[k] - pop[idx];
} else if (pop[idx] > high[k]) {
pop[idx] = 2*high[k] - pop[idx];
}
if (pop[idx] < low[k] || pop[idx] > high[k]) {
pop[idx] = low[k] + randu()*(high[k]-low[k]);
}
}
}
}
void bounds_clip(int Nchain, int Nvar, double pop[], double low[], double high[])
{
int k, p, idx;
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (p=0; p < Nchain; p++) {
for (k=0; k < Nvar; k++) {
idx = p*Nvar+k;
if (pop[idx] < low[k]) {
pop[idx] = low[k];
} else if (pop[idx] > high[k]) {
pop[idx] = high[k];
}
}
}
}
void bounds_fold(int Nchain, int Nvar, double pop[], double low[], double high[])
{
int k, p, idx;
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (p=0; p < Nchain; p++) {
for (k=0; k < Nvar; k++) {
idx = p*Nvar+k;
if (pop[idx] < low[k]) {
if (isinf(high[k])) {
pop[idx] = 2*low[k] - pop[idx];
} else {
pop[idx] = high[k] - (low[k] - pop[idx]);
}
} else if (pop[idx] > high[k]) {
if (isinf(low[k])) {
pop[idx] = 2*high[k] - pop[idx];
} else {
pop[idx] = low[k] - (high[k] - pop[idx]);
}
}
if (pop[idx] < low[k] || pop[idx] > high[k]) {
pop[idx] = low[k] + randu()*(high[k]-low[k]);
}
}
}
}
void bounds_random(int Nchain, int Nvar, double pop[], double low[], double high[])
{
int k, p, idx;
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (p=0; p < Nchain; p++) {
for (k=0; k < Nvar; k++) {
idx = p*Nvar+k;
if (pop[idx] < low[k]) {
if (isinf(high[k])) {
pop[idx] = 2*low[k] - pop[idx];
} else {
pop[idx] = low[k] + randu()*(high[k]-low[k]);
}
} else if (pop[idx] > high[k]) {
if (isinf(low[k])) {
pop[idx] = 2*high[k] - pop[idx];
} else {
pop[idx] = low[k] + randu()*(high[k]-low[k]);
}
}
}
}
}
void bounds_ignore(int Nchain, int Nvar, double pop[], double low[], double high[])
{
}
|
NAS_BT.c | //---------------------------------------------------------------------
// program BT
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E)
# define CLASS_W
#endif
//----------
// Class S:
//----------
#ifdef CLASS_S
# define PROBLEM_SIZE 12
# define NITER_DEFAULT 60
# define DT_DEFAULT 0.010
#endif
//----------
// Class W:
//----------
#ifdef CLASS_W
# define PROBLEM_SIZE 24
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0008
#endif
//----------
// Class A:
//----------
#ifdef CLASS_A
# define PROBLEM_SIZE 64
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0008
#endif
//----------
// Class B:
//----------
#ifdef CLASS_B
# define PROBLEM_SIZE 102
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0003
#endif
//----------
// Class C:
//----------
#ifdef CLASS_C
# define PROBLEM_SIZE 162
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0001
#endif
//----------
// Class D:
//----------
#ifdef CLASS_D
# define PROBLEM_SIZE 408
# define NITER_DEFAULT 250
# define DT_DEFAULT 0.00002
#endif
//----------
// Class E:
//----------
#ifdef CLASS_E
# define PROBLEM_SIZE 1020
# define NITER_DEFAULT 250
# define DT_DEFAULT 0.4e-5
#endif
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
#define IMAXP IMAX/2*2
#define JMAXP JMAX/2*2
typedef struct
{
double real;
double imag;
} dcomplex;
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
/* common /global/ */
int grid_points[3];
/* common /constants/ */
double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/* common /fields/ */
double us [KMAX][JMAXP + 1][IMAXP + 1];
double vs [KMAX][JMAXP + 1][IMAXP + 1];
double ws [KMAX][JMAXP + 1][IMAXP + 1];
double qs [KMAX][JMAXP + 1][IMAXP + 1];
double rho_i [KMAX][JMAXP + 1][IMAXP + 1];
double square [KMAX][JMAXP + 1][IMAXP + 1];
double forcing[KMAX][JMAXP + 1][IMAXP + 1][5];
double u [KMAX][JMAXP + 1][IMAXP + 1][5];
double rhs [KMAX][JMAXP + 1][IMAXP + 1][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
#define t_total 1
#define t_rhsx 2
#define t_rhsy 3
#define t_rhsz 4
#define t_rhs 5
#define t_xsolve 6
#define t_ysolve 7
#define t_zsolve 8
#define t_rdis1 9
#define t_rdis2 10
#define t_add 11
#define t_last 11
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *class, int *verified);
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified);
double start[64], elapsed[64];
double elapsed_time( void );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
void wtime(double *t);
int main(int argc, char *argv[])
{
int i, niter, step;
double navg, mflops, n3;
double tmax, t, trecs[t_last + 1];
int verified;
char Class;
char *t_names[t_last + 1];
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - BT Benchmark\n\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
printf(" Size: %4dx%4dx%4d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if ( (grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) )
{
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
set_constants();
for (i = 1; i <= t_last; i++)
{
timer_clear(i);
}
initialize();
exact_rhs();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
for (i = 1; i <= t_last; i++)
{
timer_clear(i);
}
timer_start(1);
for (step = 1; step <= niter; step++)
{
if ((step % 20) == 0 || step == 1)
{
printf(" Time step %4d\n", step);
}
adi();
}
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
n3 = 1.0 * grid_points[0] * grid_points[1] * grid_points[2];
navg = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
if (tmax != 0.0)
{
mflops = 1.0e-6 * (double)niter *
(3478.8 * n3 - 17655.7 * (navg * navg) + 28023.7 * navg)
/ tmax;
}
else
{
mflops = 0.0;
}
print_results("BT", Class, grid_points[0],
grid_points[1], grid_points[2], niter,
tmax, mflops, " floating point",
verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
void adi()
{
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add()
{
int i, j, k, m;
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, rhs)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5])
{
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++)
{
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++)
{
add = u[k][j][i][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++)
{
for (d = 0; d < 3; d++)
{
rms[m] = rms[m] / (double)(grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5])
{
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++)
{
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(grid_points, rhs) reduction(+ : rms[:5])
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
add = rhs[k][j][i][m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++)
{
for (d = 0; d < 3; d++)
{
rms[m] = rms[m] / (double)(grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs()
{
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
double cuf[PROBLEM_SIZE + 1];
double q [PROBLEM_SIZE + 1];
double ue [PROBLEM_SIZE + 1][5];
double buf[PROBLEM_SIZE + 1][5];
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (k = 1; k <= grid_points[2] - 2; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 1; j <= grid_points[1] - 2; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] +
buf[i][3] * ue[i][3]);
}
for (i = 1; i <= grid_points[0] - 2; i++)
{
im1 = i - 1;
ip1 = i + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
tx2 * ( ue[ip1][1] - ue[im1][1] ) +
dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * (
(ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) -
(ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) +
xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) +
dx2tx1 * ( ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (
ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) +
xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) +
dx3tx1 * ( ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (
ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) +
xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) +
dx4tx1 * ( ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (
buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) -
buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) +
0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] +
buf[im1][0]) +
xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) +
xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) +
dx5tx1 * ( ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
i = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] -
4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
for (i = 3; i <= grid_points[0] - 4; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] +
6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
i = grid_points[0] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] +
6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (k = 1; k <= grid_points[2] - 2; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
xi = (double)(i) * dnxm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] +
buf[j][3] * ue[j][3]);
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
jm1 = j - 1;
jp1 = j + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
ty2 * ( ue[jp1][2] - ue[jm1][2] ) +
dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (
ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) +
yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) +
dy2ty1 * ( ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * (
(ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) -
(ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) +
yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) +
dy3ty1 * ( ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (
ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) +
yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) +
dy4ty1 * ( ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (
buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) -
buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) +
0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] +
buf[jm1][0]) +
yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) +
yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) +
dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
j = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] -
4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
for (j = 3; j <= grid_points[1] - 4; j++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] +
6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
j = grid_points[1] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] +
6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (j = 1; j <= grid_points[1] - 2; j++)
{
eta = (double)(j) * dnym1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
xi = (double)(i) * dnxm1;
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] +
buf[k][3] * ue[k][3]);
}
for (k = 1; k <= grid_points[2] - 2; k++)
{
km1 = k - 1;
kp1 = k + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
tz2 * ( ue[kp1][3] - ue[km1][3] ) +
dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (
ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) +
zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) +
dz2tz1 * ( ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (
ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) +
zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) +
dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * (
(ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) -
(ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) +
zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) +
dz4tz1 * ( ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (
buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) -
buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) +
0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0]
+ buf[km1][0]) +
zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) +
zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) +
dz5tz1 * ( ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
k = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] -
4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
for (k = 3; k <= grid_points[2] - 4; k++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] +
6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
k = grid_points[2] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] +
6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function returns the exact solution at point xi, eta, zeta
//---------------------------------------------------------------------
void exact_solution(double xi, double eta, double zeta, double dtemp[5])
{
int m;
for (m = 0; m < 5; m++)
{
dtemp[m] = ce[m][0] +
xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) +
eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) +
zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] +
zeta * ce[m][12])));
}
}
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize()
{
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, ix, iy, iz, m, zeta, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, Pface)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
for (ix = 0; ix < 2; ix++)
{
exact_solution((double)ix, eta, zeta, Pface[ix][0]);
}
for (iy = 0; iy < 2; iy++)
{
exact_solution(xi, (double)iy , zeta, Pface[iy][1]);
}
for (iz = 0; iz < 2; iz++)
{
exact_solution(xi, eta, (double)iz, Pface[iz][2]);
}
for (m = 0; m < 5; m++)
{
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta -
Pxi * Peta - Pxi * Pzeta - Peta * Pzeta +
Pxi * Peta * Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
}
void lhsinit(double lhs[][3][5][5], int size)
{
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
for (n = 0; n < 5; n++)
{
for (m = 0; m < 5; m++)
{
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
void compute_rhs()
{
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, rho_inv) firstprivate(grid_points, u)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
rho_inv = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5 * (
u[k][j][i][1] * u[k][j][i][1] +
u[k][j][i][2] * u[k][j][i][2] +
u[k][j][i][3] * u[k][j][i][3] ) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
}
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, grid_points, us, u, square, vs, ws, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
uijk = us[k][j][i];
up1 = us[k][j][i + 1];
um1 = us[k][j][i - 1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 *
(u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] +
u[k][j][i - 1][0]) -
tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 *
(u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] +
u[k][j][i - 1][1]) +
xxcon2 * con43 * (up1 - 2.0 * uijk + um1) -
tx2 * (u[k][j][i + 1][1] * up1 -
u[k][j][i - 1][1] * um1 +
(u[k][j][i + 1][4] - square[k][j][i + 1] -
u[k][j][i - 1][4] + square[k][j][i - 1]) *
c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 *
(u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] +
u[k][j][i - 1][2]) +
xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] +
vs[k][j][i - 1]) -
tx2 * (u[k][j][i + 1][2] * up1 -
u[k][j][i - 1][2] * um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 *
(u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] +
u[k][j][i - 1][3]) +
xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] +
ws[k][j][i - 1]) -
tx2 * (u[k][j][i + 1][3] * up1 -
u[k][j][i - 1][3] * um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 *
(u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] +
u[k][j][i - 1][4]) +
xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] +
qs[k][j][i - 1]) +
xxcon4 * (up1 * up1 - 2.0 * uijk * uijk +
um1 * um1) +
xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k][j][i - 1][4] * rho_i[k][j][i - 1]) -
tx2 * ( (c1 * u[k][j][i + 1][4] -
c2 * square[k][j][i + 1]) * up1 -
(c1 * u[k][j][i - 1][4] -
c2 * square[k][j][i - 1]) * um1 );
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1] - 2; j++)
{
i = 1;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] +
u[k][j][i + 2][m]);
}
i = 2;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 3; i <= grid_points[0] - 4; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] +
u[k][j][i + 2][m] );
}
}
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
i = grid_points[0] - 3;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] );
}
i = grid_points[0] - 2;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.*u[k][j][i - 1][m] +
5.*u[k][j][i][m] );
}
}
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, grid_points, vs, u, us, square, ws, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
vijk = vs[k][j][i];
vp1 = vs[k][j + 1][i];
vm1 = vs[k][j - 1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 *
(u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] +
u[k][j - 1][i][0]) -
ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 *
(u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] +
u[k][j - 1][i][1]) +
yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] +
us[k][j - 1][i]) -
ty2 * (u[k][j + 1][i][1] * vp1 -
u[k][j - 1][i][1] * vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 *
(u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] +
u[k][j - 1][i][2]) +
yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) -
ty2 * (u[k][j + 1][i][2] * vp1 -
u[k][j - 1][i][2] * vm1 +
(u[k][j + 1][i][4] - square[k][j + 1][i] -
u[k][j - 1][i][4] + square[k][j - 1][i])
* c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 *
(u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] +
u[k][j - 1][i][3]) +
yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] +
ws[k][j - 1][i]) -
ty2 * (u[k][j + 1][i][3] * vp1 -
u[k][j - 1][i][3] * vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 *
(u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] +
u[k][j - 1][i][4]) +
yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] +
qs[k][j - 1][i]) +
yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk +
vm1 * vm1) +
yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k][j - 1][i][4] * rho_i[k][j - 1][i]) -
ty2 * ((c1 * u[k][j + 1][i][4] -
c2 * square[k][j + 1][i]) * vp1 -
(c1 * u[k][j - 1][i][4] -
c2 * square[k][j - 1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] +
u[k][j + 2][i][m]);
}
}
j = 2;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
for (j = 3; j <= grid_points[1] - 4; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] +
u[k][j + 2][i][m] );
}
}
}
j = grid_points[1] - 3;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] );
}
}
j = grid_points[1] - 2;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.*u[k][j - 1][i][m] +
5.*u[k][j][i][m] );
}
}
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
wijk = ws[k][j][i];
wp1 = ws[k + 1][j][i];
wm1 = ws[k - 1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 *
(u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] +
u[k - 1][j][i][0]) -
tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 *
(u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] +
u[k - 1][j][i][1]) +
zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] +
us[k - 1][j][i]) -
tz2 * (u[k + 1][j][i][1] * wp1 -
u[k - 1][j][i][1] * wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 *
(u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] +
u[k - 1][j][i][2]) +
zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] +
vs[k - 1][j][i]) -
tz2 * (u[k + 1][j][i][2] * wp1 -
u[k - 1][j][i][2] * wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 *
(u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] +
u[k - 1][j][i][3]) +
zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) -
tz2 * (u[k + 1][j][i][3] * wp1 -
u[k - 1][j][i][3] * wm1 +
(u[k + 1][j][i][4] - square[k + 1][j][i] -
u[k - 1][j][i][4] + square[k - 1][j][i])
* c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 *
(u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] +
u[k - 1][j][i][4]) +
zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] +
qs[k - 1][j][i]) +
zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk +
wm1 * wm1) +
zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k - 1][j][i][4] * rho_i[k - 1][j][i]) -
tz2 * ( (c1 * u[k + 1][j][i][4] -
c2 * square[k + 1][j][i]) * wp1 -
(c1 * u[k - 1][j][i][4] -
c2 * square[k - 1][j][i]) * wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] +
u[k + 2][j][i][m]);
}
}
}
k = 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dssp, grid_points, u)
for (k = 3; k <= grid_points[2] - 4; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] +
u[k + 2][j][i][m] );
}
}
}
}
k = grid_points[2] - 3;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] );
}
}
}
k = grid_points[2] - 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.*u[k - 1][j][i][m] +
5.*u[k][j][i][m] );
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dt, grid_points)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
}
}
void set_constants()
{
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0] - 1);
dnym1 = 1.0 / (double)(grid_points[1] - 1);
dnzm1 = 1.0 / (double)(grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
//---------------------------------------------------------------------
// subtracts bvec=bvec - ablock*avec
//---------------------------------------------------------------------
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5])
{
//---------------------------------------------------------------------
// rhs[kc][jc][ic][i] = rhs[kc][jc][ic][i]
// $ - lhs[ia][ablock][0][i]*
//---------------------------------------------------------------------
bvec[0] = bvec[0] - ablock[0][0] * avec[0]
- ablock[1][0] * avec[1]
- ablock[2][0] * avec[2]
- ablock[3][0] * avec[3]
- ablock[4][0] * avec[4];
bvec[1] = bvec[1] - ablock[0][1] * avec[0]
- ablock[1][1] * avec[1]
- ablock[2][1] * avec[2]
- ablock[3][1] * avec[3]
- ablock[4][1] * avec[4];
bvec[2] = bvec[2] - ablock[0][2] * avec[0]
- ablock[1][2] * avec[1]
- ablock[2][2] * avec[2]
- ablock[3][2] * avec[3]
- ablock[4][2] * avec[4];
bvec[3] = bvec[3] - ablock[0][3] * avec[0]
- ablock[1][3] * avec[1]
- ablock[2][3] * avec[2]
- ablock[3][3] * avec[3]
- ablock[4][3] * avec[4];
bvec[4] = bvec[4] - ablock[0][4] * avec[0]
- ablock[1][4] * avec[1]
- ablock[2][4] * avec[2]
- ablock[3][4] * avec[3]
- ablock[4][4] * avec[4];
}
//---------------------------------------------------------------------
// subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
//---------------------------------------------------------------------
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5])
{
cblock[0][0] = cblock[0][0] - ablock[0][0] * bblock[0][0]
- ablock[1][0] * bblock[0][1]
- ablock[2][0] * bblock[0][2]
- ablock[3][0] * bblock[0][3]
- ablock[4][0] * bblock[0][4];
cblock[0][1] = cblock[0][1] - ablock[0][1] * bblock[0][0]
- ablock[1][1] * bblock[0][1]
- ablock[2][1] * bblock[0][2]
- ablock[3][1] * bblock[0][3]
- ablock[4][1] * bblock[0][4];
cblock[0][2] = cblock[0][2] - ablock[0][2] * bblock[0][0]
- ablock[1][2] * bblock[0][1]
- ablock[2][2] * bblock[0][2]
- ablock[3][2] * bblock[0][3]
- ablock[4][2] * bblock[0][4];
cblock[0][3] = cblock[0][3] - ablock[0][3] * bblock[0][0]
- ablock[1][3] * bblock[0][1]
- ablock[2][3] * bblock[0][2]
- ablock[3][3] * bblock[0][3]
- ablock[4][3] * bblock[0][4];
cblock[0][4] = cblock[0][4] - ablock[0][4] * bblock[0][0]
- ablock[1][4] * bblock[0][1]
- ablock[2][4] * bblock[0][2]
- ablock[3][4] * bblock[0][3]
- ablock[4][4] * bblock[0][4];
cblock[1][0] = cblock[1][0] - ablock[0][0] * bblock[1][0]
- ablock[1][0] * bblock[1][1]
- ablock[2][0] * bblock[1][2]
- ablock[3][0] * bblock[1][3]
- ablock[4][0] * bblock[1][4];
cblock[1][1] = cblock[1][1] - ablock[0][1] * bblock[1][0]
- ablock[1][1] * bblock[1][1]
- ablock[2][1] * bblock[1][2]
- ablock[3][1] * bblock[1][3]
- ablock[4][1] * bblock[1][4];
cblock[1][2] = cblock[1][2] - ablock[0][2] * bblock[1][0]
- ablock[1][2] * bblock[1][1]
- ablock[2][2] * bblock[1][2]
- ablock[3][2] * bblock[1][3]
- ablock[4][2] * bblock[1][4];
cblock[1][3] = cblock[1][3] - ablock[0][3] * bblock[1][0]
- ablock[1][3] * bblock[1][1]
- ablock[2][3] * bblock[1][2]
- ablock[3][3] * bblock[1][3]
- ablock[4][3] * bblock[1][4];
cblock[1][4] = cblock[1][4] - ablock[0][4] * bblock[1][0]
- ablock[1][4] * bblock[1][1]
- ablock[2][4] * bblock[1][2]
- ablock[3][4] * bblock[1][3]
- ablock[4][4] * bblock[1][4];
cblock[2][0] = cblock[2][0] - ablock[0][0] * bblock[2][0]
- ablock[1][0] * bblock[2][1]
- ablock[2][0] * bblock[2][2]
- ablock[3][0] * bblock[2][3]
- ablock[4][0] * bblock[2][4];
cblock[2][1] = cblock[2][1] - ablock[0][1] * bblock[2][0]
- ablock[1][1] * bblock[2][1]
- ablock[2][1] * bblock[2][2]
- ablock[3][1] * bblock[2][3]
- ablock[4][1] * bblock[2][4];
cblock[2][2] = cblock[2][2] - ablock[0][2] * bblock[2][0]
- ablock[1][2] * bblock[2][1]
- ablock[2][2] * bblock[2][2]
- ablock[3][2] * bblock[2][3]
- ablock[4][2] * bblock[2][4];
cblock[2][3] = cblock[2][3] - ablock[0][3] * bblock[2][0]
- ablock[1][3] * bblock[2][1]
- ablock[2][3] * bblock[2][2]
- ablock[3][3] * bblock[2][3]
- ablock[4][3] * bblock[2][4];
cblock[2][4] = cblock[2][4] - ablock[0][4] * bblock[2][0]
- ablock[1][4] * bblock[2][1]
- ablock[2][4] * bblock[2][2]
- ablock[3][4] * bblock[2][3]
- ablock[4][4] * bblock[2][4];
cblock[3][0] = cblock[3][0] - ablock[0][0] * bblock[3][0]
- ablock[1][0] * bblock[3][1]
- ablock[2][0] * bblock[3][2]
- ablock[3][0] * bblock[3][3]
- ablock[4][0] * bblock[3][4];
cblock[3][1] = cblock[3][1] - ablock[0][1] * bblock[3][0]
- ablock[1][1] * bblock[3][1]
- ablock[2][1] * bblock[3][2]
- ablock[3][1] * bblock[3][3]
- ablock[4][1] * bblock[3][4];
cblock[3][2] = cblock[3][2] - ablock[0][2] * bblock[3][0]
- ablock[1][2] * bblock[3][1]
- ablock[2][2] * bblock[3][2]
- ablock[3][2] * bblock[3][3]
- ablock[4][2] * bblock[3][4];
cblock[3][3] = cblock[3][3] - ablock[0][3] * bblock[3][0]
- ablock[1][3] * bblock[3][1]
- ablock[2][3] * bblock[3][2]
- ablock[3][3] * bblock[3][3]
- ablock[4][3] * bblock[3][4];
cblock[3][4] = cblock[3][4] - ablock[0][4] * bblock[3][0]
- ablock[1][4] * bblock[3][1]
- ablock[2][4] * bblock[3][2]
- ablock[3][4] * bblock[3][3]
- ablock[4][4] * bblock[3][4];
cblock[4][0] = cblock[4][0] - ablock[0][0] * bblock[4][0]
- ablock[1][0] * bblock[4][1]
- ablock[2][0] * bblock[4][2]
- ablock[3][0] * bblock[4][3]
- ablock[4][0] * bblock[4][4];
cblock[4][1] = cblock[4][1] - ablock[0][1] * bblock[4][0]
- ablock[1][1] * bblock[4][1]
- ablock[2][1] * bblock[4][2]
- ablock[3][1] * bblock[4][3]
- ablock[4][1] * bblock[4][4];
cblock[4][2] = cblock[4][2] - ablock[0][2] * bblock[4][0]
- ablock[1][2] * bblock[4][1]
- ablock[2][2] * bblock[4][2]
- ablock[3][2] * bblock[4][3]
- ablock[4][2] * bblock[4][4];
cblock[4][3] = cblock[4][3] - ablock[0][3] * bblock[4][0]
- ablock[1][3] * bblock[4][1]
- ablock[2][3] * bblock[4][2]
- ablock[3][3] * bblock[4][3]
- ablock[4][3] * bblock[4][4];
cblock[4][4] = cblock[4][4] - ablock[0][4] * bblock[4][0]
- ablock[1][4] * bblock[4][1]
- ablock[2][4] * bblock[4][2]
- ablock[3][4] * bblock[4][3]
- ablock[4][4] * bblock[4][4];
}
void binvcrhs(double lhs[5][5], double c[5][5], double r[5])
{
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
c[0][0] = c[0][0] * pivot;
c[1][0] = c[1][0] * pivot;
c[2][0] = c[2][0] * pivot;
c[3][0] = c[3][0] * pivot;
c[4][0] = c[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
c[0][1] = c[0][1] - coeff * c[0][0];
c[1][1] = c[1][1] - coeff * c[1][0];
c[2][1] = c[2][1] - coeff * c[2][0];
c[3][1] = c[3][1] - coeff * c[3][0];
c[4][1] = c[4][1] - coeff * c[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
c[0][2] = c[0][2] - coeff * c[0][0];
c[1][2] = c[1][2] - coeff * c[1][0];
c[2][2] = c[2][2] - coeff * c[2][0];
c[3][2] = c[3][2] - coeff * c[3][0];
c[4][2] = c[4][2] - coeff * c[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
c[0][3] = c[0][3] - coeff * c[0][0];
c[1][3] = c[1][3] - coeff * c[1][0];
c[2][3] = c[2][3] - coeff * c[2][0];
c[3][3] = c[3][3] - coeff * c[3][0];
c[4][3] = c[4][3] - coeff * c[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
c[0][4] = c[0][4] - coeff * c[0][0];
c[1][4] = c[1][4] - coeff * c[1][0];
c[2][4] = c[2][4] - coeff * c[2][0];
c[3][4] = c[3][4] - coeff * c[3][0];
c[4][4] = c[4][4] - coeff * c[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
c[0][1] = c[0][1] * pivot;
c[1][1] = c[1][1] * pivot;
c[2][1] = c[2][1] * pivot;
c[3][1] = c[3][1] * pivot;
c[4][1] = c[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
c[0][0] = c[0][0] - coeff * c[0][1];
c[1][0] = c[1][0] - coeff * c[1][1];
c[2][0] = c[2][0] - coeff * c[2][1];
c[3][0] = c[3][0] - coeff * c[3][1];
c[4][0] = c[4][0] - coeff * c[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
c[0][2] = c[0][2] - coeff * c[0][1];
c[1][2] = c[1][2] - coeff * c[1][1];
c[2][2] = c[2][2] - coeff * c[2][1];
c[3][2] = c[3][2] - coeff * c[3][1];
c[4][2] = c[4][2] - coeff * c[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
c[0][3] = c[0][3] - coeff * c[0][1];
c[1][3] = c[1][3] - coeff * c[1][1];
c[2][3] = c[2][3] - coeff * c[2][1];
c[3][3] = c[3][3] - coeff * c[3][1];
c[4][3] = c[4][3] - coeff * c[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
c[0][4] = c[0][4] - coeff * c[0][1];
c[1][4] = c[1][4] - coeff * c[1][1];
c[2][4] = c[2][4] - coeff * c[2][1];
c[3][4] = c[3][4] - coeff * c[3][1];
c[4][4] = c[4][4] - coeff * c[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
c[0][2] = c[0][2] * pivot;
c[1][2] = c[1][2] * pivot;
c[2][2] = c[2][2] * pivot;
c[3][2] = c[3][2] * pivot;
c[4][2] = c[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
c[0][0] = c[0][0] - coeff * c[0][2];
c[1][0] = c[1][0] - coeff * c[1][2];
c[2][0] = c[2][0] - coeff * c[2][2];
c[3][0] = c[3][0] - coeff * c[3][2];
c[4][0] = c[4][0] - coeff * c[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
c[0][1] = c[0][1] - coeff * c[0][2];
c[1][1] = c[1][1] - coeff * c[1][2];
c[2][1] = c[2][1] - coeff * c[2][2];
c[3][1] = c[3][1] - coeff * c[3][2];
c[4][1] = c[4][1] - coeff * c[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
c[0][3] = c[0][3] - coeff * c[0][2];
c[1][3] = c[1][3] - coeff * c[1][2];
c[2][3] = c[2][3] - coeff * c[2][2];
c[3][3] = c[3][3] - coeff * c[3][2];
c[4][3] = c[4][3] - coeff * c[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
c[0][4] = c[0][4] - coeff * c[0][2];
c[1][4] = c[1][4] - coeff * c[1][2];
c[2][4] = c[2][4] - coeff * c[2][2];
c[3][4] = c[3][4] - coeff * c[3][2];
c[4][4] = c[4][4] - coeff * c[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
c[0][3] = c[0][3] * pivot;
c[1][3] = c[1][3] * pivot;
c[2][3] = c[2][3] * pivot;
c[3][3] = c[3][3] * pivot;
c[4][3] = c[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
c[0][0] = c[0][0] - coeff * c[0][3];
c[1][0] = c[1][0] - coeff * c[1][3];
c[2][0] = c[2][0] - coeff * c[2][3];
c[3][0] = c[3][0] - coeff * c[3][3];
c[4][0] = c[4][0] - coeff * c[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
c[0][1] = c[0][1] - coeff * c[0][3];
c[1][1] = c[1][1] - coeff * c[1][3];
c[2][1] = c[2][1] - coeff * c[2][3];
c[3][1] = c[3][1] - coeff * c[3][3];
c[4][1] = c[4][1] - coeff * c[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
c[0][2] = c[0][2] - coeff * c[0][3];
c[1][2] = c[1][2] - coeff * c[1][3];
c[2][2] = c[2][2] - coeff * c[2][3];
c[3][2] = c[3][2] - coeff * c[3][3];
c[4][2] = c[4][2] - coeff * c[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
c[0][4] = c[0][4] - coeff * c[0][3];
c[1][4] = c[1][4] - coeff * c[1][3];
c[2][4] = c[2][4] - coeff * c[2][3];
c[3][4] = c[3][4] - coeff * c[3][3];
c[4][4] = c[4][4] - coeff * c[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
c[0][4] = c[0][4] * pivot;
c[1][4] = c[1][4] * pivot;
c[2][4] = c[2][4] * pivot;
c[3][4] = c[3][4] * pivot;
c[4][4] = c[4][4] * pivot;
r[4] = r[4] * pivot;
coeff = lhs[4][0];
c[0][0] = c[0][0] - coeff * c[0][4];
c[1][0] = c[1][0] - coeff * c[1][4];
c[2][0] = c[2][0] - coeff * c[2][4];
c[3][0] = c[3][0] - coeff * c[3][4];
c[4][0] = c[4][0] - coeff * c[4][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
c[0][1] = c[0][1] - coeff * c[0][4];
c[1][1] = c[1][1] - coeff * c[1][4];
c[2][1] = c[2][1] - coeff * c[2][4];
c[3][1] = c[3][1] - coeff * c[3][4];
c[4][1] = c[4][1] - coeff * c[4][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
c[0][2] = c[0][2] - coeff * c[0][4];
c[1][2] = c[1][2] - coeff * c[1][4];
c[2][2] = c[2][2] - coeff * c[2][4];
c[3][2] = c[3][2] - coeff * c[3][4];
c[4][2] = c[4][2] - coeff * c[4][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
c[0][3] = c[0][3] - coeff * c[0][4];
c[1][3] = c[1][3] - coeff * c[1][4];
c[2][3] = c[2][3] - coeff * c[2][4];
c[3][3] = c[3][3] - coeff * c[3][4];
c[4][3] = c[4][3] - coeff * c[4][4];
r[3] = r[3] - coeff * r[4];
}
void binvrhs(double lhs[5][5], double r[5])
{
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
r[4] = r[4] * pivot;
coeff = lhs[4][0];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
r[3] = r[3] - coeff * r[4];
}
//---------------------------------------------------------------------
// verification routine
//---------------------------------------------------------------------
void verify(int no_time_steps, char *Class, int *verified)
{
double xcrref[5], xceref[5], xcrdif[5], xcedif[5];
double epsilon, xce[5], xcr[5], dtref = 0.0;
int m;
//---------------------------------------------------------------------
// tolerance level
//---------------------------------------------------------------------
epsilon = 1.0e-08;
//---------------------------------------------------------------------
// compute the error norm and the residual norm, and exit if not printing
//---------------------------------------------------------------------
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++)
{
xcr[m] = xcr[m] / dt;
}
*Class = 'U';
*verified = 1;
for (m = 0; m < 5; m++)
{
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
//---------------------------------------------------------------------
// reference data for 12X12X12 grids after 60 time steps, with DT = 1.0e-02
//---------------------------------------------------------------------
if ( (grid_points[0] == 12) && (grid_points[1] == 12) &&
(grid_points[2] == 12) && (no_time_steps == 60))
{
*Class = 'S';
dtref = 1.0e-2;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
//---------------------------------------------------------------------
// reference data for 24X24X24 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 24) && (grid_points[1] == 24) &&
(grid_points[2] == 24) && (no_time_steps == 200) )
{
*Class = 'W';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
//---------------------------------------------------------------------
// reference data for 64X64X64 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 64) && (grid_points[1] == 64) &&
(grid_points[2] == 64) && (no_time_steps == 200) )
{
*Class = 'A';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
//---------------------------------------------------------------------
// reference data for 102X102X102 grids after 200 time steps,
// with DT = 3.0e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 102) && (grid_points[1] == 102) &&
(grid_points[2] == 102) && (no_time_steps == 200) )
{
*Class = 'B';
dtref = 3.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
//---------------------------------------------------------------------
// reference data for 162X162X162 grids after 200 time steps,
// with DT = 1.0e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 162) && (grid_points[1] == 162) &&
(grid_points[2] == 162) && (no_time_steps == 200) )
{
*Class = 'C';
dtref = 1.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
//---------------------------------------------------------------------
// reference data for 408x408x408 grids after 250 time steps,
// with DT = 0.2e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 408) && (grid_points[1] == 408) &&
(grid_points[2] == 408) && (no_time_steps == 250) )
{
*Class = 'D';
dtref = 0.2e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.2533188551738e+05;
xcrref[1] = 0.2346393716980e+04;
xcrref[2] = 0.6294554366904e+04;
xcrref[3] = 0.5352565376030e+04;
xcrref[4] = 0.3905864038618e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.3100009377557e+03;
xceref[1] = 0.2424086324913e+02;
xceref[2] = 0.7782212022645e+02;
xceref[3] = 0.6835623860116e+02;
xceref[4] = 0.6065737200368e+03;
//---------------------------------------------------------------------
// reference data for 1020x1020x1020 grids after 250 time steps,
// with DT = 0.4e-05
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 1020) && (grid_points[1] == 1020) &&
(grid_points[2] == 1020) && (no_time_steps == 250) )
{
*Class = 'E';
dtref = 0.4e-5;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.9795372484517e+05;
xcrref[1] = 0.9739814511521e+04;
xcrref[2] = 0.2467606342965e+05;
xcrref[3] = 0.2092419572860e+05;
xcrref[4] = 0.1392138856939e+06;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4327562208414e+03;
xceref[1] = 0.3699051964887e+02;
xceref[2] = 0.1089845040954e+03;
xceref[3] = 0.9462517622043e+02;
xceref[4] = 0.7765512765309e+03;
}
else
{
*verified = 0;
}
//---------------------------------------------------------------------
// verification test for residuals if gridsize is one of
// the defined grid sizes above (*Class != 'U')
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Compute the difference of solution values and the known reference values.
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]);
xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]);
}
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
if (*Class != 'U')
{
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
*verified = (fabs(dt - dtref) <= epsilon);
if (!(*verified))
{
*Class = 'U';
printf(" DT does not match the reference value of %15.8E\n", dtref);
}
}
else
{
printf(" Unknown class\n");
}
if (*Class != 'U')
{
printf(" Comparison of RMS-norms of residual\n");
}
else
{
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++)
{
if (*Class == 'U')
{
printf(" %2d%20.13E\n", m + 1, xcr[m]);
}
else if (xcrdif[m] <= epsilon)
{
printf(" %2d%20.13E%20.13E%20.13E\n",
m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
else
{
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",
m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*Class != 'U')
{
printf(" Comparison of RMS-norms of solution error\n");
}
else
{
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++)
{
if (*Class == 'U')
{
printf(" %2d%20.13E\n", m + 1, xce[m]);
}
else if (xcedif[m] <= epsilon)
{
printf(" %2d%20.13E%20.13E%20.13E\n",
m + 1, xce[m], xceref[m], xcedif[m]);
}
else
{
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",
m + 1, xce[m], xceref[m], xcedif[m]);
}
}
if (*Class == 'U')
{
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if (*verified)
{
printf(" Verification Successful\n");
}
else
{
printf(" Verification failed\n");
}
}
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, m, n, isize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0] - 1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
#pragma omp parallel for default(shared) private(i, tmp1, tmp2, tmp3) firstprivate(isize, k, j, c2, c1, con43, c3c4, c1345, rho_i, u, qs, square)
for (i = 0; i <= isize; i++)
{
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 );
fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 );
fjac[i][4][1] = c2;
fjac[i][0][2] = - ( u[k][j][i][1] * u[k][j][i][2] ) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = - ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * tmp2 );
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( u[k][j][i][1] * u[k][j][i][1] * tmp2 + qs[k][j][i] );
fjac[i][2][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][1] ) * tmp2;
fjac[i][3][4] = - c2 * ( u[k][j][i][3] * u[k][j][i][1] ) * tmp2;
fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 );
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = - ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][1];
njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[i][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
#pragma omp parallel for default(shared) private(i, tmp1, tmp2) firstprivate(isize, dt, tx1, tx2, dx1, dx2, dx3, dx4, dx5, fjac, njac)
for (i = 1; i <= isize - 1; i++)
{
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][AA][0][0] = - tmp2 * fjac[i - 1][0][0]
- tmp1 * njac[i - 1][0][0]
- tmp1 * dx1;
lhs[i][AA][1][0] = - tmp2 * fjac[i - 1][1][0]
- tmp1 * njac[i - 1][1][0];
lhs[i][AA][2][0] = - tmp2 * fjac[i - 1][2][0]
- tmp1 * njac[i - 1][2][0];
lhs[i][AA][3][0] = - tmp2 * fjac[i - 1][3][0]
- tmp1 * njac[i - 1][3][0];
lhs[i][AA][4][0] = - tmp2 * fjac[i - 1][4][0]
- tmp1 * njac[i - 1][4][0];
lhs[i][AA][0][1] = - tmp2 * fjac[i - 1][0][1]
- tmp1 * njac[i - 1][0][1];
lhs[i][AA][1][1] = - tmp2 * fjac[i - 1][1][1]
- tmp1 * njac[i - 1][1][1]
- tmp1 * dx2;
lhs[i][AA][2][1] = - tmp2 * fjac[i - 1][2][1]
- tmp1 * njac[i - 1][2][1];
lhs[i][AA][3][1] = - tmp2 * fjac[i - 1][3][1]
- tmp1 * njac[i - 1][3][1];
lhs[i][AA][4][1] = - tmp2 * fjac[i - 1][4][1]
- tmp1 * njac[i - 1][4][1];
lhs[i][AA][0][2] = - tmp2 * fjac[i - 1][0][2]
- tmp1 * njac[i - 1][0][2];
lhs[i][AA][1][2] = - tmp2 * fjac[i - 1][1][2]
- tmp1 * njac[i - 1][1][2];
lhs[i][AA][2][2] = - tmp2 * fjac[i - 1][2][2]
- tmp1 * njac[i - 1][2][2]
- tmp1 * dx3;
lhs[i][AA][3][2] = - tmp2 * fjac[i - 1][3][2]
- tmp1 * njac[i - 1][3][2];
lhs[i][AA][4][2] = - tmp2 * fjac[i - 1][4][2]
- tmp1 * njac[i - 1][4][2];
lhs[i][AA][0][3] = - tmp2 * fjac[i - 1][0][3]
- tmp1 * njac[i - 1][0][3];
lhs[i][AA][1][3] = - tmp2 * fjac[i - 1][1][3]
- tmp1 * njac[i - 1][1][3];
lhs[i][AA][2][3] = - tmp2 * fjac[i - 1][2][3]
- tmp1 * njac[i - 1][2][3];
lhs[i][AA][3][3] = - tmp2 * fjac[i - 1][3][3]
- tmp1 * njac[i - 1][3][3]
- tmp1 * dx4;
lhs[i][AA][4][3] = - tmp2 * fjac[i - 1][4][3]
- tmp1 * njac[i - 1][4][3];
lhs[i][AA][0][4] = - tmp2 * fjac[i - 1][0][4]
- tmp1 * njac[i - 1][0][4];
lhs[i][AA][1][4] = - tmp2 * fjac[i - 1][1][4]
- tmp1 * njac[i - 1][1][4];
lhs[i][AA][2][4] = - tmp2 * fjac[i - 1][2][4]
- tmp1 * njac[i - 1][2][4];
lhs[i][AA][3][4] = - tmp2 * fjac[i - 1][3][4]
- tmp1 * njac[i - 1][3][4];
lhs[i][AA][4][4] = - tmp2 * fjac[i - 1][4][4]
- tmp1 * njac[i - 1][4][4]
- tmp1 * dx5;
lhs[i][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][CC][0][0] = tmp2 * fjac[i + 1][0][0]
- tmp1 * njac[i + 1][0][0]
- tmp1 * dx1;
lhs[i][CC][1][0] = tmp2 * fjac[i + 1][1][0]
- tmp1 * njac[i + 1][1][0];
lhs[i][CC][2][0] = tmp2 * fjac[i + 1][2][0]
- tmp1 * njac[i + 1][2][0];
lhs[i][CC][3][0] = tmp2 * fjac[i + 1][3][0]
- tmp1 * njac[i + 1][3][0];
lhs[i][CC][4][0] = tmp2 * fjac[i + 1][4][0]
- tmp1 * njac[i + 1][4][0];
lhs[i][CC][0][1] = tmp2 * fjac[i + 1][0][1]
- tmp1 * njac[i + 1][0][1];
lhs[i][CC][1][1] = tmp2 * fjac[i + 1][1][1]
- tmp1 * njac[i + 1][1][1]
- tmp1 * dx2;
lhs[i][CC][2][1] = tmp2 * fjac[i + 1][2][1]
- tmp1 * njac[i + 1][2][1];
lhs[i][CC][3][1] = tmp2 * fjac[i + 1][3][1]
- tmp1 * njac[i + 1][3][1];
lhs[i][CC][4][1] = tmp2 * fjac[i + 1][4][1]
- tmp1 * njac[i + 1][4][1];
lhs[i][CC][0][2] = tmp2 * fjac[i + 1][0][2]
- tmp1 * njac[i + 1][0][2];
lhs[i][CC][1][2] = tmp2 * fjac[i + 1][1][2]
- tmp1 * njac[i + 1][1][2];
lhs[i][CC][2][2] = tmp2 * fjac[i + 1][2][2]
- tmp1 * njac[i + 1][2][2]
- tmp1 * dx3;
lhs[i][CC][3][2] = tmp2 * fjac[i + 1][3][2]
- tmp1 * njac[i + 1][3][2];
lhs[i][CC][4][2] = tmp2 * fjac[i + 1][4][2]
- tmp1 * njac[i + 1][4][2];
lhs[i][CC][0][3] = tmp2 * fjac[i + 1][0][3]
- tmp1 * njac[i + 1][0][3];
lhs[i][CC][1][3] = tmp2 * fjac[i + 1][1][3]
- tmp1 * njac[i + 1][1][3];
lhs[i][CC][2][3] = tmp2 * fjac[i + 1][2][3]
- tmp1 * njac[i + 1][2][3];
lhs[i][CC][3][3] = tmp2 * fjac[i + 1][3][3]
- tmp1 * njac[i + 1][3][3]
- tmp1 * dx4;
lhs[i][CC][4][3] = tmp2 * fjac[i + 1][4][3]
- tmp1 * njac[i + 1][4][3];
lhs[i][CC][0][4] = tmp2 * fjac[i + 1][0][4]
- tmp1 * njac[i + 1][0][4];
lhs[i][CC][1][4] = tmp2 * fjac[i + 1][1][4]
- tmp1 * njac[i + 1][1][4];
lhs[i][CC][2][4] = tmp2 * fjac[i + 1][2][4]
- tmp1 * njac[i + 1][2][4];
lhs[i][CC][3][4] = tmp2 * fjac[i + 1][3][4]
- tmp1 * njac[i + 1][3][4];
lhs[i][CC][4][4] = tmp2 * fjac[i + 1][4][4]
- tmp1 * njac[i + 1][4][4]
- tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (i = 1; i <= isize - 1; i++)
{
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][AA], rhs[k][j][i - 1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][AA], lhs[i - 1][CC], lhs[i][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][AA], rhs[k][j][isize - 1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][AA], lhs[isize - 1][CC], lhs[isize][BB]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[isize][BB], rhs[k][j][isize] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
for (i = isize - 1; i >= 0; i--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[i][CC][n][m] * rhs[k][j][i + 1][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Y direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void y_solve()
{
int i, j, k, m, n, jsize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three y-factors
//---------------------------------------------------------------------
jsize = grid_points[1] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the tri-diagonal matrix;
// determine a (labeled f) and n jacobians for cell c
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
#pragma omp parallel for default(shared) private(j, tmp1, tmp2, tmp3) firstprivate(jsize, k, i, c2, c1, c3c4, con43, c1345, rho_i, u, qs, square)
for (j = 0; j <= jsize; j++)
{
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[j][0][0] = 0.0;
fjac[j][1][0] = 0.0;
fjac[j][2][0] = 1.0;
fjac[j][3][0] = 0.0;
fjac[j][4][0] = 0.0;
fjac[j][0][1] = - ( u[k][j][i][1] * u[k][j][i][2] ) * tmp2;
fjac[j][1][1] = u[k][j][i][2] * tmp1;
fjac[j][2][1] = u[k][j][i][1] * tmp1;
fjac[j][3][1] = 0.0;
fjac[j][4][1] = 0.0;
fjac[j][0][2] = - ( u[k][j][i][2] * u[k][j][i][2] * tmp2)
+ c2 * qs[k][j][i];
fjac[j][1][2] = - c2 * u[k][j][i][1] * tmp1;
fjac[j][2][2] = ( 2.0 - c2 ) * u[k][j][i][2] * tmp1;
fjac[j][3][2] = - c2 * u[k][j][i][3] * tmp1;
fjac[j][4][2] = c2;
fjac[j][0][3] = - ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[j][1][3] = 0.0;
fjac[j][2][3] = u[k][j][i][3] * tmp1;
fjac[j][3][3] = u[k][j][i][2] * tmp1;
fjac[j][4][3] = 0.0;
fjac[j][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][2] * tmp2;
fjac[j][1][4] = - c2 * u[k][j][i][1] * u[k][j][i][2] * tmp2;
fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( qs[k][j][i] + u[k][j][i][2] * u[k][j][i][2] * tmp2 );
fjac[j][3][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1;
njac[j][0][0] = 0.0;
njac[j][1][0] = 0.0;
njac[j][2][0] = 0.0;
njac[j][3][0] = 0.0;
njac[j][4][0] = 0.0;
njac[j][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[j][1][1] = c3c4 * tmp1;
njac[j][2][1] = 0.0;
njac[j][3][1] = 0.0;
njac[j][4][1] = 0.0;
njac[j][0][2] = - con43 * c3c4 * tmp2 * u[k][j][i][2];
njac[j][1][2] = 0.0;
njac[j][2][2] = con43 * c3c4 * tmp1;
njac[j][3][2] = 0.0;
njac[j][4][2] = 0.0;
njac[j][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[j][1][3] = 0.0;
njac[j][2][3] = 0.0;
njac[j][3][3] = c3c4 * tmp1;
njac[j][4][3] = 0.0;
njac[j][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[j][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[j][2][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[j][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[j][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now joacobians set, so form left hand side in y direction
//---------------------------------------------------------------------
lhsinit(lhs, jsize);
#pragma omp parallel for default(shared) private(j, tmp1, tmp2) firstprivate(jsize, dt, ty1, ty2, dy1, dy2, dy3, dy4, dy5, fjac, njac)
for (j = 1; j <= jsize - 1; j++)
{
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[j][AA][0][0] = - tmp2 * fjac[j - 1][0][0]
- tmp1 * njac[j - 1][0][0]
- tmp1 * dy1;
lhs[j][AA][1][0] = - tmp2 * fjac[j - 1][1][0]
- tmp1 * njac[j - 1][1][0];
lhs[j][AA][2][0] = - tmp2 * fjac[j - 1][2][0]
- tmp1 * njac[j - 1][2][0];
lhs[j][AA][3][0] = - tmp2 * fjac[j - 1][3][0]
- tmp1 * njac[j - 1][3][0];
lhs[j][AA][4][0] = - tmp2 * fjac[j - 1][4][0]
- tmp1 * njac[j - 1][4][0];
lhs[j][AA][0][1] = - tmp2 * fjac[j - 1][0][1]
- tmp1 * njac[j - 1][0][1];
lhs[j][AA][1][1] = - tmp2 * fjac[j - 1][1][1]
- tmp1 * njac[j - 1][1][1]
- tmp1 * dy2;
lhs[j][AA][2][1] = - tmp2 * fjac[j - 1][2][1]
- tmp1 * njac[j - 1][2][1];
lhs[j][AA][3][1] = - tmp2 * fjac[j - 1][3][1]
- tmp1 * njac[j - 1][3][1];
lhs[j][AA][4][1] = - tmp2 * fjac[j - 1][4][1]
- tmp1 * njac[j - 1][4][1];
lhs[j][AA][0][2] = - tmp2 * fjac[j - 1][0][2]
- tmp1 * njac[j - 1][0][2];
lhs[j][AA][1][2] = - tmp2 * fjac[j - 1][1][2]
- tmp1 * njac[j - 1][1][2];
lhs[j][AA][2][2] = - tmp2 * fjac[j - 1][2][2]
- tmp1 * njac[j - 1][2][2]
- tmp1 * dy3;
lhs[j][AA][3][2] = - tmp2 * fjac[j - 1][3][2]
- tmp1 * njac[j - 1][3][2];
lhs[j][AA][4][2] = - tmp2 * fjac[j - 1][4][2]
- tmp1 * njac[j - 1][4][2];
lhs[j][AA][0][3] = - tmp2 * fjac[j - 1][0][3]
- tmp1 * njac[j - 1][0][3];
lhs[j][AA][1][3] = - tmp2 * fjac[j - 1][1][3]
- tmp1 * njac[j - 1][1][3];
lhs[j][AA][2][3] = - tmp2 * fjac[j - 1][2][3]
- tmp1 * njac[j - 1][2][3];
lhs[j][AA][3][3] = - tmp2 * fjac[j - 1][3][3]
- tmp1 * njac[j - 1][3][3]
- tmp1 * dy4;
lhs[j][AA][4][3] = - tmp2 * fjac[j - 1][4][3]
- tmp1 * njac[j - 1][4][3];
lhs[j][AA][0][4] = - tmp2 * fjac[j - 1][0][4]
- tmp1 * njac[j - 1][0][4];
lhs[j][AA][1][4] = - tmp2 * fjac[j - 1][1][4]
- tmp1 * njac[j - 1][1][4];
lhs[j][AA][2][4] = - tmp2 * fjac[j - 1][2][4]
- tmp1 * njac[j - 1][2][4];
lhs[j][AA][3][4] = - tmp2 * fjac[j - 1][3][4]
- tmp1 * njac[j - 1][3][4];
lhs[j][AA][4][4] = - tmp2 * fjac[j - 1][4][4]
- tmp1 * njac[j - 1][4][4]
- tmp1 * dy5;
lhs[j][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[j][0][0]
+ tmp1 * 2.0 * dy1;
lhs[j][BB][1][0] = tmp1 * 2.0 * njac[j][1][0];
lhs[j][BB][2][0] = tmp1 * 2.0 * njac[j][2][0];
lhs[j][BB][3][0] = tmp1 * 2.0 * njac[j][3][0];
lhs[j][BB][4][0] = tmp1 * 2.0 * njac[j][4][0];
lhs[j][BB][0][1] = tmp1 * 2.0 * njac[j][0][1];
lhs[j][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[j][1][1]
+ tmp1 * 2.0 * dy2;
lhs[j][BB][2][1] = tmp1 * 2.0 * njac[j][2][1];
lhs[j][BB][3][1] = tmp1 * 2.0 * njac[j][3][1];
lhs[j][BB][4][1] = tmp1 * 2.0 * njac[j][4][1];
lhs[j][BB][0][2] = tmp1 * 2.0 * njac[j][0][2];
lhs[j][BB][1][2] = tmp1 * 2.0 * njac[j][1][2];
lhs[j][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[j][2][2]
+ tmp1 * 2.0 * dy3;
lhs[j][BB][3][2] = tmp1 * 2.0 * njac[j][3][2];
lhs[j][BB][4][2] = tmp1 * 2.0 * njac[j][4][2];
lhs[j][BB][0][3] = tmp1 * 2.0 * njac[j][0][3];
lhs[j][BB][1][3] = tmp1 * 2.0 * njac[j][1][3];
lhs[j][BB][2][3] = tmp1 * 2.0 * njac[j][2][3];
lhs[j][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[j][3][3]
+ tmp1 * 2.0 * dy4;
lhs[j][BB][4][3] = tmp1 * 2.0 * njac[j][4][3];
lhs[j][BB][0][4] = tmp1 * 2.0 * njac[j][0][4];
lhs[j][BB][1][4] = tmp1 * 2.0 * njac[j][1][4];
lhs[j][BB][2][4] = tmp1 * 2.0 * njac[j][2][4];
lhs[j][BB][3][4] = tmp1 * 2.0 * njac[j][3][4];
lhs[j][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[j][4][4]
+ tmp1 * 2.0 * dy5;
lhs[j][CC][0][0] = tmp2 * fjac[j + 1][0][0]
- tmp1 * njac[j + 1][0][0]
- tmp1 * dy1;
lhs[j][CC][1][0] = tmp2 * fjac[j + 1][1][0]
- tmp1 * njac[j + 1][1][0];
lhs[j][CC][2][0] = tmp2 * fjac[j + 1][2][0]
- tmp1 * njac[j + 1][2][0];
lhs[j][CC][3][0] = tmp2 * fjac[j + 1][3][0]
- tmp1 * njac[j + 1][3][0];
lhs[j][CC][4][0] = tmp2 * fjac[j + 1][4][0]
- tmp1 * njac[j + 1][4][0];
lhs[j][CC][0][1] = tmp2 * fjac[j + 1][0][1]
- tmp1 * njac[j + 1][0][1];
lhs[j][CC][1][1] = tmp2 * fjac[j + 1][1][1]
- tmp1 * njac[j + 1][1][1]
- tmp1 * dy2;
lhs[j][CC][2][1] = tmp2 * fjac[j + 1][2][1]
- tmp1 * njac[j + 1][2][1];
lhs[j][CC][3][1] = tmp2 * fjac[j + 1][3][1]
- tmp1 * njac[j + 1][3][1];
lhs[j][CC][4][1] = tmp2 * fjac[j + 1][4][1]
- tmp1 * njac[j + 1][4][1];
lhs[j][CC][0][2] = tmp2 * fjac[j + 1][0][2]
- tmp1 * njac[j + 1][0][2];
lhs[j][CC][1][2] = tmp2 * fjac[j + 1][1][2]
- tmp1 * njac[j + 1][1][2];
lhs[j][CC][2][2] = tmp2 * fjac[j + 1][2][2]
- tmp1 * njac[j + 1][2][2]
- tmp1 * dy3;
lhs[j][CC][3][2] = tmp2 * fjac[j + 1][3][2]
- tmp1 * njac[j + 1][3][2];
lhs[j][CC][4][2] = tmp2 * fjac[j + 1][4][2]
- tmp1 * njac[j + 1][4][2];
lhs[j][CC][0][3] = tmp2 * fjac[j + 1][0][3]
- tmp1 * njac[j + 1][0][3];
lhs[j][CC][1][3] = tmp2 * fjac[j + 1][1][3]
- tmp1 * njac[j + 1][1][3];
lhs[j][CC][2][3] = tmp2 * fjac[j + 1][2][3]
- tmp1 * njac[j + 1][2][3];
lhs[j][CC][3][3] = tmp2 * fjac[j + 1][3][3]
- tmp1 * njac[j + 1][3][3]
- tmp1 * dy4;
lhs[j][CC][4][3] = tmp2 * fjac[j + 1][4][3]
- tmp1 * njac[j + 1][4][3];
lhs[j][CC][0][4] = tmp2 * fjac[j + 1][0][4]
- tmp1 * njac[j + 1][0][4];
lhs[j][CC][1][4] = tmp2 * fjac[j + 1][1][4]
- tmp1 * njac[j + 1][1][4];
lhs[j][CC][2][4] = tmp2 * fjac[j + 1][2][4]
- tmp1 * njac[j + 1][2][4];
lhs[j][CC][3][4] = tmp2 * fjac[j + 1][3][4]
- tmp1 * njac[j + 1][3][4];
lhs[j][CC][4][4] = tmp2 * fjac[j + 1][4][4]
- tmp1 * njac[j + 1][4][4]
- tmp1 * dy5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(JMAX) and rhs'(JMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][0][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][0][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (j = 1; j <= jsize - 1; j++)
{
//-------------------------------------------------------------------
// subtract A*lhs_vector(j-1) from lhs_vector(j)
//
// rhs(j) = rhs(j) - A*rhs(j-1)
//-------------------------------------------------------------------
matvec_sub(lhs[j][AA], rhs[k][j - 1][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(j) = B(j) - C(j-1)*A(j)
//-------------------------------------------------------------------
matmul_sub(lhs[j][AA], lhs[j - 1][CC], lhs[j][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[j][BB], lhs[j][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[jsize][AA], rhs[k][jsize - 1][i], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
// matmul_sub(AA,i,jsize,k,c,
// $ CC,i,jsize-1,k,c,BB,i,jsize,k)
//---------------------------------------------------------------------
matmul_sub(lhs[jsize][AA], lhs[jsize - 1][CC], lhs[jsize][BB]);
//---------------------------------------------------------------------
// multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[jsize][BB], rhs[k][jsize][i] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(jsize)=rhs(jsize)
// else assume U(jsize) is loaded in un pack backsub_info
// so just use it
// after u(jstart) will be sent to next cell
//---------------------------------------------------------------------
for (j = jsize - 1; j >= 0; j--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[j][CC][n][m] * rhs[k][j + 1][i][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve()
{
int i, j, k, m, n, ksize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
#pragma omp parallel for default(shared) private(k, tmp1, tmp2, tmp3) firstprivate(ksize, j, i, c2, c1, c3c4, con43, c3, c4, c1345, u, qs, square)
for (k = 0; k <= ksize; k++)
{
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = - ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = - ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = - (u[k][j][i][3] * u[k][j][i][3] * tmp2 )
+ c2 * qs[k][j][i];
fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][3] * tmp2;
fjac[k][1][4] = - c2 * ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[k][2][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 )
- c2 * ( qs[k][j][i] + u[k][j][i][3] * u[k][j][i][3] * tmp2 );
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[k][3][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][3];
njac[k][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
#pragma omp parallel for default(shared) private(k, tmp1, tmp2) firstprivate(ksize, dt, tz1, tz2, dz1, dz2, dz3, dz4, dz5, fjac, njac)
for (k = 1; k <= ksize - 1; k++)
{
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][AA][0][0] = - tmp2 * fjac[k - 1][0][0]
- tmp1 * njac[k - 1][0][0]
- tmp1 * dz1;
lhs[k][AA][1][0] = - tmp2 * fjac[k - 1][1][0]
- tmp1 * njac[k - 1][1][0];
lhs[k][AA][2][0] = - tmp2 * fjac[k - 1][2][0]
- tmp1 * njac[k - 1][2][0];
lhs[k][AA][3][0] = - tmp2 * fjac[k - 1][3][0]
- tmp1 * njac[k - 1][3][0];
lhs[k][AA][4][0] = - tmp2 * fjac[k - 1][4][0]
- tmp1 * njac[k - 1][4][0];
lhs[k][AA][0][1] = - tmp2 * fjac[k - 1][0][1]
- tmp1 * njac[k - 1][0][1];
lhs[k][AA][1][1] = - tmp2 * fjac[k - 1][1][1]
- tmp1 * njac[k - 1][1][1]
- tmp1 * dz2;
lhs[k][AA][2][1] = - tmp2 * fjac[k - 1][2][1]
- tmp1 * njac[k - 1][2][1];
lhs[k][AA][3][1] = - tmp2 * fjac[k - 1][3][1]
- tmp1 * njac[k - 1][3][1];
lhs[k][AA][4][1] = - tmp2 * fjac[k - 1][4][1]
- tmp1 * njac[k - 1][4][1];
lhs[k][AA][0][2] = - tmp2 * fjac[k - 1][0][2]
- tmp1 * njac[k - 1][0][2];
lhs[k][AA][1][2] = - tmp2 * fjac[k - 1][1][2]
- tmp1 * njac[k - 1][1][2];
lhs[k][AA][2][2] = - tmp2 * fjac[k - 1][2][2]
- tmp1 * njac[k - 1][2][2]
- tmp1 * dz3;
lhs[k][AA][3][2] = - tmp2 * fjac[k - 1][3][2]
- tmp1 * njac[k - 1][3][2];
lhs[k][AA][4][2] = - tmp2 * fjac[k - 1][4][2]
- tmp1 * njac[k - 1][4][2];
lhs[k][AA][0][3] = - tmp2 * fjac[k - 1][0][3]
- tmp1 * njac[k - 1][0][3];
lhs[k][AA][1][3] = - tmp2 * fjac[k - 1][1][3]
- tmp1 * njac[k - 1][1][3];
lhs[k][AA][2][3] = - tmp2 * fjac[k - 1][2][3]
- tmp1 * njac[k - 1][2][3];
lhs[k][AA][3][3] = - tmp2 * fjac[k - 1][3][3]
- tmp1 * njac[k - 1][3][3]
- tmp1 * dz4;
lhs[k][AA][4][3] = - tmp2 * fjac[k - 1][4][3]
- tmp1 * njac[k - 1][4][3];
lhs[k][AA][0][4] = - tmp2 * fjac[k - 1][0][4]
- tmp1 * njac[k - 1][0][4];
lhs[k][AA][1][4] = - tmp2 * fjac[k - 1][1][4]
- tmp1 * njac[k - 1][1][4];
lhs[k][AA][2][4] = - tmp2 * fjac[k - 1][2][4]
- tmp1 * njac[k - 1][2][4];
lhs[k][AA][3][4] = - tmp2 * fjac[k - 1][3][4]
- tmp1 * njac[k - 1][3][4];
lhs[k][AA][4][4] = - tmp2 * fjac[k - 1][4][4]
- tmp1 * njac[k - 1][4][4]
- tmp1 * dz5;
lhs[k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[k][CC][0][0] = tmp2 * fjac[k + 1][0][0]
- tmp1 * njac[k + 1][0][0]
- tmp1 * dz1;
lhs[k][CC][1][0] = tmp2 * fjac[k + 1][1][0]
- tmp1 * njac[k + 1][1][0];
lhs[k][CC][2][0] = tmp2 * fjac[k + 1][2][0]
- tmp1 * njac[k + 1][2][0];
lhs[k][CC][3][0] = tmp2 * fjac[k + 1][3][0]
- tmp1 * njac[k + 1][3][0];
lhs[k][CC][4][0] = tmp2 * fjac[k + 1][4][0]
- tmp1 * njac[k + 1][4][0];
lhs[k][CC][0][1] = tmp2 * fjac[k + 1][0][1]
- tmp1 * njac[k + 1][0][1];
lhs[k][CC][1][1] = tmp2 * fjac[k + 1][1][1]
- tmp1 * njac[k + 1][1][1]
- tmp1 * dz2;
lhs[k][CC][2][1] = tmp2 * fjac[k + 1][2][1]
- tmp1 * njac[k + 1][2][1];
lhs[k][CC][3][1] = tmp2 * fjac[k + 1][3][1]
- tmp1 * njac[k + 1][3][1];
lhs[k][CC][4][1] = tmp2 * fjac[k + 1][4][1]
- tmp1 * njac[k + 1][4][1];
lhs[k][CC][0][2] = tmp2 * fjac[k + 1][0][2]
- tmp1 * njac[k + 1][0][2];
lhs[k][CC][1][2] = tmp2 * fjac[k + 1][1][2]
- tmp1 * njac[k + 1][1][2];
lhs[k][CC][2][2] = tmp2 * fjac[k + 1][2][2]
- tmp1 * njac[k + 1][2][2]
- tmp1 * dz3;
lhs[k][CC][3][2] = tmp2 * fjac[k + 1][3][2]
- tmp1 * njac[k + 1][3][2];
lhs[k][CC][4][2] = tmp2 * fjac[k + 1][4][2]
- tmp1 * njac[k + 1][4][2];
lhs[k][CC][0][3] = tmp2 * fjac[k + 1][0][3]
- tmp1 * njac[k + 1][0][3];
lhs[k][CC][1][3] = tmp2 * fjac[k + 1][1][3]
- tmp1 * njac[k + 1][1][3];
lhs[k][CC][2][3] = tmp2 * fjac[k + 1][2][3]
- tmp1 * njac[k + 1][2][3];
lhs[k][CC][3][3] = tmp2 * fjac[k + 1][3][3]
- tmp1 * njac[k + 1][3][3]
- tmp1 * dz4;
lhs[k][CC][4][3] = tmp2 * fjac[k + 1][4][3]
- tmp1 * njac[k + 1][4][3];
lhs[k][CC][0][4] = tmp2 * fjac[k + 1][0][4]
- tmp1 * njac[k + 1][0][4];
lhs[k][CC][1][4] = tmp2 * fjac[k + 1][1][4]
- tmp1 * njac[k + 1][1][4];
lhs[k][CC][2][4] = tmp2 * fjac[k + 1][2][4]
- tmp1 * njac[k + 1][2][4];
lhs[k][CC][3][4] = tmp2 * fjac[k + 1][3][4]
- tmp1 * njac[k + 1][3][4];
lhs[k][CC][4][4] = tmp2 * fjac[k + 1][4][4]
- tmp1 * njac[k + 1][4][4]
- tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (k = 1; k <= ksize - 1; k++)
{
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][AA], rhs[k - 1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][AA], lhs[k - 1][CC], lhs[k][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][AA], rhs[ksize - 1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][AA], lhs[ksize - 1][CC], lhs[ksize][BB]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[ksize][BB], rhs[ksize][j][i] );
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
for (k = ksize - 1; k >= 0; k--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[k][CC][n][m] * rhs[k + 1][j][i][n];
}
}
}
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified)
{
char size[16];
int j;
printf( "\n\n %s Benchmark Completed.\n", name );
printf( " Class = %12c\n", class );
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if ( ( n2 == 0 ) && ( n3 == 0 ) )
{
if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) )
{
sprintf( size, "%15.0lf", pow(2.0, n1) );
j = 14;
if ( size[j] == '.' )
{
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf( " Size = %15s\n", size );
}
else
{
printf( " Size = %12d\n", n1 );
}
}
else
{
printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 );
}
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2lf\n", t );
printf( " Mop/s total = %15.2lf\n", mops );
printf( " Operation type = %24s\n", optype );
if ( verified )
printf( " Verification = %12s\n", "SUCCESSFUL" );
else
printf( " Verification = %12s\n", "UNSUCCESSFUL" );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return ( t );
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return ( elapsed[n] );
}
|
GB_unaryop__ainv_int16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_uint64
// op(A') function: GB_tran__ainv_int16_uint64
// C type: int16_t
// A type: uint64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_uint64
(
int16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bias_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: chh@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_bias_fp32(struct tensor* input_tensor, struct tensor* output_tensor, struct tensor* bias_tensor,
int num_thread)
{
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
float* in_data = (float*)input_tensor->data;
float* bias = (float*)bias_tensor->data;
float* out_data = (float*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channels; c++)
{
float* out_ptr = out_data + c * size;
float* in_ptr = in_data + c * size;
for (int i = 0; i < size; i++)
{
out_ptr[i] = in_ptr[i] + bias[c];
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* bias_tensor;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_bias_fp32(input_tensor, output_tensor, bias_tensor, exec_graph->num_thread);
else
TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_bias_ref_op()
{
return register_builtin_node_ops(OP_BIAS, &hcl_node_ops);
}
int unregister_bias_ref_op()
{
return unregister_builtin_node_ops(OP_BIAS, &hcl_node_ops);
}
|
correctness-checking-partitioned-impl.c | /*
Copyright 2021 Tim Jammer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "correctness-checking-partitioned-impl.h"
#include "assert.h"
#ifdef DO_VALGRIND_CHECKS
#include "memcheck.h"
#endif
#include "mpi.h"
#include <stdlib.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#ifdef DEBUGING_PRINTINGS
static int next_operation_number = 0;
#endif
int MPIX_Psend_init(void *buf, int partitions, MPI_Count count,
MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Info info,
MPIX_Request *request) {
// some information set by_partition_ sending_op if needed, init it with 0
memset(request, 0, sizeof(MPIX_Request));
// init request
request->buf_start = buf;
MPI_Aint size;
MPI_Type_extent(datatype, &size); //TODO with vector types this will give a lot f false positives (?)
request->partition_length_bytes = size * count;
request->partition_count = partitions;
request->partitions_ready = 0;
request->is_active = 0;
#ifdef DO_VALGRIND_CHECKS
request->valgrind_block_handle = VALGRIND_CREATE_BLOCK(buf,
request->partition_length_bytes * request->partition_count,
SEND_BLOCK_STRING);
#endif
request->dest = dest;
#ifdef DEBUGING_PRINTINGS
// increment and assign
request->operation_number = next_operation_number++;
#endif
// init MPI
return MPI_Send_init(buf, count * partitions, datatype, dest, tag, comm,
&request->request);
}
int MPIX_Precv_init(void *buf, int partitions, MPI_Count count,
MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Info info,
MPIX_Request *request) {
// some information set by_partition_ sending_op if needed, init it with 0
memset(request, 0, sizeof(MPIX_Request));
// init request
request->buf_start = buf;
MPI_Aint size;
MPI_Type_extent(datatype, &size);
request->partition_length_bytes = size * count;
request->partition_count = partitions;
request->partitions_ready = 0;
request->is_active = 0;
#ifdef DO_VALGRIND_CHECKS
request->valgrind_block_handle = VALGRIND_CREATE_BLOCK(buf,
request->partition_length_bytes * request->partition_count,
RECV_BLOCK_STRING);
#endif
#ifdef DEBUGING_PRINTINGS
// increment and assign
request->operation_number = next_operation_number++;
#endif
// init MPI
return MPI_Recv_init(buf, count * partitions, datatype, dest, tag, comm,
&request->request);
}
int MPIX_Pready(int partition, MPIX_Request *request) {
assert(request->is_active == 1);
#ifdef DO_VALGRIND_CHECKS
// taint partition as modification is forbidden
VALGRIND_MAKE_MEM_NOACCESS(
((char* )request->buf_start)
+ request->partition_length_bytes * partition,
request->partition_length_bytes);
// for a send operation reading is actually legal!!
// valgrind does not support this fine grained analysis :-(
// so we have to filter valgrinds errors based on the block names
#endif
#pragma omp atomic
++request->partitions_ready;
return 0;
}
int MPIX_Pready_range(int partition_low, int partition_high,
MPIX_Request *request) {
for (int i = partition_low; i <= partition_high; ++i) {
MPIX_Pready(i, request);
}
return 0;
}
int MPIX_Start(MPIX_Request *request) {
// do nothing now
assert(request->is_active == 0);
request->is_active = 1;
assert(request->partitions_ready == 0);
return 0;
}
int MPIX_Wait(MPIX_Request *request, MPI_Status *status) {
//TODO: with #pragma omp atomic capture
//we can call the start part when the last thread signs off the partitions
assert(request->is_active == 1);
// if partition count ==1 then this should also function like a normal persistent operation
if (request->partition_count == 1 && request->partitions_ready != 1) {
MPIX_Pready(0, request);
}
#ifdef DEBUGING_PRINTINGS
printf("Operation %d: %d of %d Partitions are ready\n",
request->operation_number, request->partitions_ready,
request->partition_count);
#endif
assert(request->partition_count == request->partitions_ready);
#ifdef DO_VALGRIND_CHECKS
// now access is legal again
VALGRIND_MAKE_MEM_DEFINED(request->buf_start,
request->partition_length_bytes * request->partition_count);
#endif
if (request->dest != MPI_PROC_NULL) {
//TODO is this a bug in MPICH implementation?
//as its segfaults if starting a request to MPI_PROC_NULL
//TODO confirm that it is standard compilant to use proc null in persistent op and file bug report
//TODO check with oter implementation such as openmpi
MPI_Start(&request->request);
}
// only start communication now, so that MPI itself does not interfere with
// our memory access Analysis this way of implementing things is legal
// according to the MPI standard anyway
// reset for next start call
request->is_active = 0;
request->partitions_ready = 0;
// reset the local overlap information
if (request->local_overlap) {
memset(request->local_overlap, 0,
sizeof(int) * request->partition_count);
}
if (request->dest != MPI_PROC_NULL) {
return MPI_Wait(&request->request, status);
} else
return 0;
}
int MPIX_Request_free(MPIX_Request *request) {
assert(request->is_active == 0);
#ifdef DO_VALGRIND_CHECKS
VALGRIND_DISCARD(request->valgrind_block_handle);
#endif
if (request->local_overlap) {
free(request->local_overlap);
}
if (request->local_overlap_count) {
free(request->local_overlap_count);
}
return MPI_Request_free(&request->request);
}
// current iter is the last index of current loop iteration+1 (upper bound)
int signoff_partitions_after_loop_iter(MPIX_Request *request,
// loop info
// access= pattern ax+b
long min_iter, long max_iter) {
// else: do nothing, mark message ready at wait call
if (request->partition_count > 1) {
//TODO is this sufficient? or do we need (ptr_arithmetic_t) ?
long min_adress = request->A_min * min_iter + request->B_min;
long max_adress = request->A_max * max_iter + request->B_max;
//minimum_partition to sign off
int min_part_num = (min_adress - (long) request->buf_start)
/ request->partition_length_bytes;
int max_part_num = (max_adress - (long) request->buf_start)
/ request->partition_length_bytes;
if ((max_adress - (long) request->buf_start)
% request->partition_length_bytes == 0) {
// Partition boundary itself belongs to the next partition
--max_part_num;
}
#ifdef DEBUGING_PRINTINGS
printf(
"Operation %d: Loop Part %ld to %ld : ready Partitions %d to %d \n",
request->operation_number, min_iter, max_iter, min_part_num,
max_part_num);
#endif
// not outside of the boundaries of this operation
min_part_num = min_part_num < 0 ? 0 : min_part_num;
max_part_num =
max_part_num > request->partition_count - 1 ?
request->partition_count - 1 : max_part_num;
// mark all involved partitions ready
for (int i = min_part_num; i <= max_part_num; ++i) {
int new_val;
// atomic add and fetch
#pragma omp atomic capture
new_val = ++request->local_overlap[i];
if (new_val == request->local_overlap_count[i]) {
// other threads have also signed off
MPIX_Pready(i, request);
}
}
}
return 1;
}
#ifdef DEBUGING_PRINTINGS
void debug_printing(MPI_Aint type_extned, long loop_max, long loop_min,
long chunk_size, long A_min, long B_min, long A_max, long B_max,
MPIX_Request *request) {
//DEBUG PRINTING
printf("Memory Layout for partitioned Operation:\n");
char **msg_partitions = (char**) malloc(
sizeof(char*) * (request->partition_count + 1));
ptr_arithmetic_t *partition_adress = (ptr_arithmetic_t*) malloc(
sizeof(ptr_arithmetic_t) * (request->partition_count + 1));
for (int i = 0; i < request->partition_count; ++i) {
partition_adress[i] = (ptr_arithmetic_t) request->buf_start
+ (i * (ptr_arithmetic_t) request->partition_length_bytes);
size_t needed_bytes = snprintf(NULL, 0, "Start MSG Part %i", i) + 1;
msg_partitions[i] = (char*) malloc(needed_bytes);
sprintf(msg_partitions[i], "Start MSG Part %i", i);
}
partition_adress[request->partition_count] = (long) request->buf_start
+ request->partition_count * request->partition_length_bytes;
size_t needed_bytes = snprintf(NULL, 0, "End of Message") + 1;
msg_partitions[request->partition_count] = (char*) malloc(needed_bytes);
sprintf(msg_partitions[request->partition_count], "End of Message");
int chunks = (loop_max - loop_min + 1) / chunk_size;
char **msg_chunks_begin = (char**) malloc(sizeof(char*) * chunks);
ptr_arithmetic_t *chunk_adress_begin = (ptr_arithmetic_t*) malloc(
sizeof(ptr_arithmetic_t) * chunks);
char **msg_chunks_end = (char**) malloc(sizeof(char*) * chunks);
ptr_arithmetic_t *chunk_adress_end = (ptr_arithmetic_t*) malloc(
sizeof(ptr_arithmetic_t) * chunks);
for (int i = 0; i < chunks; ++i) {
unsigned long min_chunk_iter = loop_min + (i * chunk_size);
unsigned long max_chunk_iter = loop_min + ((i + 1) * chunk_size);
// not outside loop bounds
assert(min_chunk_iter >= loop_min); // otherwise makes no sense
//min_chunk_iter = min_chunk_iter <loop_min ? loop_min : min_chunk_iter;
max_chunk_iter = max_chunk_iter > loop_max ? loop_max : max_chunk_iter;
chunk_adress_begin[i] = A_min * min_chunk_iter + B_min;
chunk_adress_end[i] = A_max * max_chunk_iter + B_max;
size_t needed_bytes = snprintf(NULL, 0, "Start Loop Chunk %i", i) + 1;
msg_chunks_begin[i] = (char*) malloc(needed_bytes);
sprintf(msg_chunks_begin[i], "Start Loop Chunk %i", i);
needed_bytes = snprintf(NULL, 0, "End Loop Chunk %i", i) + 1;
msg_chunks_end[i] = (char*) malloc(needed_bytes);
sprintf(msg_chunks_end[i], "End Loop Chunk %i", i);
}
int current_chunk_begin = 0;
int current_chunk_end = 0;
int current_partition = 0;
ptr_arithmetic_t base_adress = (ptr_arithmetic_t) request->buf_start;
while (current_chunk_begin < chunks || current_chunk_end < chunks
|| current_partition <= request->partition_count) {
// ULONG_MAX *2 is enough to be larger than every ptr
ptr_arithmetic_t curr_chunk_add_begin =
current_chunk_begin < chunks ?
chunk_adress_begin[current_chunk_begin] : ULONG_MAX * 2;
ptr_arithmetic_t curr_chunk_add_end =
current_chunk_end < chunks ?
chunk_adress_end[current_chunk_end] : ULONG_MAX * 2;
ptr_arithmetic_t curr_P =
current_partition <= request->partition_count ?
partition_adress[current_partition] : ULONG_MAX * 2;
// lowest
if (curr_chunk_add_begin < curr_chunk_add_end
&& curr_chunk_add_begin < curr_P) {
printf("0x%.8lX: %s (%ld)\n",
(unsigned long) chunk_adress_begin[current_chunk_begin],
msg_chunks_begin[current_chunk_begin],
(long) (chunk_adress_begin[current_chunk_begin]
- base_adress));
free(msg_chunks_begin[current_chunk_begin]);
current_chunk_begin++;
} else if (curr_chunk_add_end < curr_P) {
printf("0x%.8lX: %s (%ld)\n",
(unsigned long) chunk_adress_end[current_chunk_end],
msg_chunks_end[current_chunk_end],
(long) (chunk_adress_end[current_chunk_end] - base_adress));
free(msg_chunks_end[current_chunk_end]);
current_chunk_end++;
} else {
printf("0x%.8lX: %s (%ld)\n",
(unsigned long) partition_adress[current_partition],
msg_partitions[current_partition],
(long) (partition_adress[current_partition] - base_adress));
free(msg_partitions[current_partition]);
current_partition++;
}
}
free(msg_chunks_begin);
free(msg_chunks_end);
free(msg_partitions);
free(chunk_adress_begin);
free(chunk_adress_end);
free(partition_adress);
printf("\n");
printf("Partitions overlap_count:\n");
for (int i = 0; i < request->partition_count; ++i) {
printf("Partition %i : %i overlaps\n", i,
request->local_overlap_count[i]);
}
}
#endif
#define MAXIMUM_ITERATIONS 10
unsigned long find_valid_partition_size_bytes(long count, long type_extend,
long requested_partition_size_bytes) {
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);// only needed to govern debg printing so that only rank 0 prints
#ifdef DEBUGING_PRINTINGS
if (rank == 0)
printf(
"find_valid Part size: count %ld, type %ld = %ldb,requested %ldb\n",
count, type_extend, count * type_extend,
requested_partition_size_bytes);
#endif
long requested_partition_size_datamembers = requested_partition_size_bytes
/ type_extend;
// request more if necessary
if (requested_partition_size_bytes % type_extend != 0) {
++requested_partition_size_datamembers;
}
long sending_size_byte = count * type_extend;
if (requested_partition_size_bytes > sending_size_byte) {
return sending_size_byte;
}
int partition_size_canidate = requested_partition_size_datamembers;
if (count % partition_size_canidate == 0) {
return partition_size_canidate * type_extend;
}
int sign = 1;
int offset = 1;
long X_start = partition_size_canidate;
int loop_count = 0;
do {
if (sign > 0) {
partition_size_canidate = X_start + sign * offset;
sign *= -1;
}
else {
partition_size_canidate = X_start + sign * offset;
sign *= -1;
offset++;
}
loop_count++;
} while (count % partition_size_canidate != 0
|| loop_count < MAXIMUM_ITERATIONS);
if (count % partition_size_canidate) {
return partition_size_canidate * type_extend;
} else {
#ifdef DEBUGING_PRINTINGS
printf(
"Was not able to calculate a good partition in %d Iterations: will not partiton this operation\n ",
MAXIMUM_ITERATIONS);
#endif
return sending_size_byte;
}
}
//%partitions = call i32 @partition_sending_op(i8* %call3, i64 4000, i32 1275069445, i32 %rem, i32 42, i32 1140850688,
//%struct.MPIX_Request* %mpix_request,
//i64 4, i64 %5, i64 %7, i64 %9,
//i64 1000, i64 0, i64 3999)
// loop size is inclusive!
int partition_sending_op(void *buf, MPI_Count count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm, MPIX_Request *request,
// loop info
// access= pattern ax+b
long curr_min, long next_min, long curr_max, long next_max,
long chunk_size, long loop_min, long loop_max) {
// calculate A and B based on the acces range of one chunk
//TODO refactor to remove irrelevant calculation of A and B
long A_min, B_min, A_max, B_max;
B_min = curr_min;
B_max = curr_max;
A_min = (next_min - curr_min) / chunk_size;
A_max = (next_max - curr_max) / chunk_size;
int partitions = 1;
int rank;
MPI_Comm_rank(comm, &rank); // only needed to govern debg printing so that only rank 0 prints
//#pragma omp single
//{
assert(A_min > 0 && "Decrementing loops not supported yet");
assert(A_max > 0 && "Decrementing loops not supported yet");
MPI_Aint type_extned, type_lb;
//MPI_Type_extent(datatype, &type_extned);
MPI_Type_get_extent(datatype, &type_lb, &type_extned);
void *chunk_access_start;
unsigned long chunk_access_length;
long chunk_access_stride; // may be negative! == overlapping access
long sending_size = type_extned * count;
long access_size = (A_max * (loop_min + chunk_size) + B_max)
- ((A_min * loop_min) + B_min);
#ifdef DEBUGING_PRINTINGS
if (rank == 0) {
printf("Try to partition this sending operation\n");
printf(" loop size: %ld-%ld chunk:%ld\n", loop_min, loop_max,
chunk_size);
printf(" sending size: %ld access_size:%ld\n", sending_size,
access_size);
printf(" Ax+b: %ldx+%ld to %ldx+%ld\n", A_min, B_min, A_max, B_max);
printf(" buf_start %lu count %lld\n", (unsigned long) buf, count);
}
assert(access_size == next_max - curr_min);
#endif
if (access_size >= sending_size) {
// no partitioning useful
#ifdef DEBUGING_PRINTINGS
if (rank == 0)
printf("Did not Partition Operation\n");
#endif
assert(partitions == 1);
MPIX_Psend_init(buf, partitions, count, datatype, dest, tag, comm,
MPI_INFO_NULL, request);
request->A_max = A_max;
request->B_max = B_max;
request->A_min = A_min;
request->B_min = B_min;
//request->type_extend = type_extned;
} else {
unsigned requested_partition_size_byte = access_size;
unsigned long valid_partition_size_byte =
find_valid_partition_size_bytes(count, type_extned,
requested_partition_size_byte);
#ifdef DEBUGING_PRINTINGS
if (rank == 0)
printf("calculated Partition size: %lub\n",
valid_partition_size_byte);
#endif
unsigned valid_partition_size_datamembers = valid_partition_size_byte
/ type_extned;
assert(valid_partition_size_byte % type_extned == 0);
partitions = count / valid_partition_size_datamembers;
assert(count % valid_partition_size_datamembers == 0);
assert(
(partitions * valid_partition_size_datamembers * type_extned)
== sending_size);
assert((valid_partition_size_byte * partitions) == sending_size);
assert(valid_partition_size_byte % type_extned == 0);
assert((valid_partition_size_datamembers * partitions) == count);
/*
int partition_size_datamembers = 0;
int partitions = 1;
if (partition_size_byte > type_extned) {
// larger: how many datamembers do we need per partition?
partitions = partition_size_byte / type_extned;
if (partition_size_byte % type_extned != 0) {
// we need full datamembers
partitions++;
}
} else if (partition_size_byte < type_extned) {
// smaller: each partition has 1 datamember
partitions = count;
partition_size_datamembers = 1;
} else {
// equals: each partition has 1 datamember
partitions = count;
partition_size_datamembers = 1;
}
*/
#ifdef STATISTIC_PRINTINGS
printf("Partitioned send operation into %d Partitions\n", partitions);
#endif
MPIX_Psend_init(buf, partitions, valid_partition_size_datamembers,
datatype, dest, tag, comm,
MPI_INFO_NULL, request);
request->A_max = A_max;
request->B_max = B_max;
request->A_min = A_min;
request->B_min = B_min;
//request->type_extend = type_extned;
// calculate local overlap
//TODO is there a better way than calculating it for each partition?
// one can parallelize it at least?
request->local_overlap = (int*) calloc(partitions, sizeof(int));
request->local_overlap_count = (int*) malloc(partitions * sizeof(int));
for (int i = 0; i < partitions; ++i) {
ptr_arithmetic_t partition_min = (ptr_arithmetic_t) buf
+ ((ptr_arithmetic_t) request->partition_length_bytes * i);
ptr_arithmetic_t partition_max = partition_min
+ (ptr_arithmetic_t) request->partition_length_bytes - 1;
// boundary is exclusive
// mem access = Ax+b ==> x = (mem-b)/A
ptr_arithmetic_t min_loop_iter = (partition_min - B_min) / A_min;
ptr_arithmetic_t max_loop_iter = (partition_max - B_max) / A_max;
//TODO what if (mem-b)%A != 0 ?? is rounding down ok?
// not outside loop bounds
min_loop_iter = min_loop_iter < loop_min ? loop_min : min_loop_iter;
max_loop_iter = max_loop_iter > loop_max ? loop_max : max_loop_iter;
//if (rank==0) printf("Partition %i from loop iter %li to %li\n",i,min_loop_iter,max_loop_iter);
ptr_arithmetic_t min_chunk = (min_loop_iter - loop_min)
/ chunk_size;
ptr_arithmetic_t max_chunk = (max_loop_iter - loop_min)
/ chunk_size;
// rounding down integer division is desired here
//if (rank==0) printf("Partition %i from chunk %li to %li\n",i,min_chunk,max_chunk);
// +1 as both numbers are inclusive
request->local_overlap_count[i] = max_chunk - min_chunk + 1;
}
#ifdef DEBUGING_PRINTINGS
if (rank == 0) {
debug_printing(type_extned, loop_max, loop_min, chunk_size, A_min,
B_min, A_max, B_max, request);
}
#endif
}
//TODO which values can be inferred for the r/w mem access on the buffer regarding the loop index
//}// end of pragma omp single
return partitions;
}
|
queue.h | // -*- C++ -*-
// Copyright (C) 2007-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/queue.h
* @brief Lock-free double-ended queue.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUEUE_H
#define _GLIBCXX_PARALLEL_QUEUE_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#include <parallel/compatibility.h>
/** @brief Decide whether to declare certain variable volatile in this file. */
#define _GLIBCXX_VOLATILE volatile
namespace __gnu_parallel
{
/**@brief Double-ended queue of bounded size, allowing lock-free
* atomic access. push_front() and pop_front() must not be called
* concurrently to each other, while pop_back() can be called
* concurrently at all times.
* @c empty(), @c size(), and @c top() are intentionally not provided.
* Calling them would not make sense in a concurrent setting.
* @param _Tp Contained element type. */
template<typename _Tp>
class _RestrictedBoundedConcurrentQueue
{
private:
/** @brief Array of elements, seen as cyclic buffer. */
_Tp* _M_base;
/** @brief Maximal number of elements contained at the same time. */
_SequenceIndex _M_max_size;
/** @brief Cyclic __begin and __end pointers contained in one
atomically changeable value. */
_GLIBCXX_VOLATILE _CASable _M_borders;
public:
/** @brief Constructor. Not to be called concurrent, of course.
* @param __max_size Maximal number of elements to be contained. */
_RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size)
{
_M_max_size = __max_size;
_M_base = new _Tp[__max_size];
_M_borders = __encode2(0, 0);
#pragma omp flush
}
/** @brief Destructor. Not to be called concurrent, of course. */
~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; }
/** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */
void
push_front(const _Tp& __t)
{
_CASable __former_borders = _M_borders;
int __former_front, __former_back;
__decode2(__former_borders, __former_front, __former_back);
*(_M_base + __former_front % _M_max_size) = __t;
#if _GLIBCXX_ASSERTIONS
// Otherwise: front - back > _M_max_size eventually.
_GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back)
<= _M_max_size);
#endif
__fetch_and_add(&_M_borders, __encode2(1, 0));
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_front(_Tp& __t)
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front - 1,
__former_back);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + (__former_front - 1) % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_back(_Tp& __t) //queue behavior
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front,
__former_back + 1);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + __former_back % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
};
} //namespace __gnu_parallel
#undef _GLIBCXX_VOLATILE
#endif /* _GLIBCXX_PARALLEL_QUEUE_H */
|
wow_srp_fmt_plug.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2012. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2012 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
*
* This implements the SRP protocol, with Blizzard's (battlenet) documented
* implementation specifics.
*
* U = username in upper case
* P = password in upper case
* s = random salt value.
*
* x = SHA1(s . SHA1(U . ":" . P));
* v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
*
* v is the 'verifier' value (256 bit value).
*
* Added OMP. Added 'default' oSSL BigNum exponentiation.
* GMP exponentation (faster) is optional, and controlled with HAVE_LIBGMP in autoconfig.h
*
* NOTE, big fix required. The incoming binary may be 64 bytes OR LESS. It
* can also be 64 bytes (or less), and have left padded 0's. We have to adjust
* several things to handle this properly. First, valid must handle it. Then
* binary and salt both must handle this. Also, crypt must handle this. NOTE,
* the string 'could' be an odd length. If so, then only 1 byte of hex is put
* into the first binary byte. all of these problems were found once I got
* jtrts.pl working with wowsrp. There now are 2 input files for wowsrp. One
* bytes of precision, then only 61 bytes will be in the string). The other
* file left pads the numbers with 0's to an even 64 bytes long, so all are
* 64 bytes. the format MUST handle both, since at this momement, we are not
* exactly sure which type will be seen in the wild. NOTE, the byte swapped
* method (GMP) within is no longer valid, and was removed.
* NOTE, we need to add split() to canonize this format (remove LPad 0's)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_blizzard;
#elif FMT_REGISTERS_H
john_register_one(&fmt_blizzard);
#else
#if AC_BUILT
/* we need to know if HAVE_LIBGMP is defined */
#include "autoconfig.h"
#endif
#include <string.h>
#include "sha.h"
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "unicode.h" /* For encoding-aware uppercasing */
#ifdef HAVE_LIBGMP
#if HAVE_GMP_GMP_H
#include <gmp/gmp.h>
#else
#include <gmp.h>
#endif
#define EXP_STR " GMP-exp"
#else
#include <openssl/bn.h>
#define EXP_STR " oSSL-exp"
#endif
#include "johnswap.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "WoWSRP"
#define FORMAT_NAME "Battlenet"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR EXP_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define WOWSIG "$WoWSRP$"
#define WOWSIGLEN 8
// min plaintext len is 8 PW's are only alpha-num uppercase
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 4
#define BINARY_ALIGN 4
#define FULL_BINARY_SIZE 32
#define SALT_SIZE (64+3)
#define SALT_ALIGN 1
#define USERNAMELEN 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 4
// salt is in hex (salt and salt2)
static struct fmt_tests tests[] = {
{WOWSIG"6D00CD214C8473C7F4E9DC77AE8FC6B3944298C48C7454E6BB8296952DCFE78D$73616C74", "PASSWORD", {"SOLAR"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432", "PASSWORD2", {"DIZ"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432*DIZ", "PASSWORD2"},
// this one has a leading 0
{"$WoWSRP$01C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
// same hash, but without 0 (only 63 byte hash).
{"$WoWSRP$1C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
{NULL}
};
#ifdef HAVE_LIBGMP
typedef struct t_SRP_CTX {
mpz_t z_mod, z_base, z_exp, z_rop;
} SRP_CTX;
#else
typedef struct t_SRP_CTX {
BIGNUM *z_mod, *z_base, *z_exp, *z_rop;
BN_CTX *BN_ctx;
}SRP_CTX;
#endif
static SRP_CTX *pSRP_CTX;
static unsigned char saved_salt[SALT_SIZE];
static unsigned char user_id[USERNAMELEN];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[8];
static int max_keys_per_crypt;
static void init(struct fmt_main *self)
{
int i;
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
pSRP_CTX = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*pSRP_CTX));
max_keys_per_crypt = self->params.max_keys_per_crypt;
for (i = 0; i < max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_init_set_str(pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719", 10);
mpz_init_set_str(pSRP_CTX[i].z_base, "47", 10);
mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10);
mpz_init(pSRP_CTX[i].z_rop);
// Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA1 value
// we need to put into it. Then we simply need to copy in the data, and possibly set
// the limb count size.
mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159);
#else
pSRP_CTX[i].z_mod=BN_new();
BN_dec2bn(&pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719");
pSRP_CTX[i].z_base=BN_new();
BN_set_word(pSRP_CTX[i].z_base, 47);
pSRP_CTX[i].z_exp=BN_new();
pSRP_CTX[i].z_rop=BN_new();
pSRP_CTX[i].BN_ctx = BN_CTX_new();
#endif
}
}
static void done(void)
{
#ifdef HAVE_LIBGMP
int i;
for (i = 0; i < max_keys_per_crypt; ++i) {
mpz_clear(pSRP_CTX[i].z_mod);
mpz_clear(pSRP_CTX[i].z_base);
mpz_clear(pSRP_CTX[i].z_exp);
mpz_clear(pSRP_CTX[i].z_rop);
}
#endif
MEM_FREE(pSRP_CTX);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, WOWSIG, WOWSIGLEN))
return 0;
q = p = &ciphertext[WOWSIGLEN];
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q-p > CIPHERTEXT_LENGTH)
return 0;
if (*q != '$')
return 0;
++q;
p = strchr(q, '*');
if (!p)
return 0;
if (((p - q) & 1))
return 0;
if (p - q >= 2 * SALT_SIZE)
return 0;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q != p)
return 0;
if (strlen(&p[1]) > USERNAMELEN)
return 0;
return 1;
}
/*
* Copy as much as ct2_size to ct2 to avoid buffer overflow
*/
static void StripZeros(const char *ct, char *ct2, const int ct2_size) {
int i;
for (i = 0; i < WOWSIGLEN && i < (ct2_size - 1); ++i)
*ct2++ = *ct++;
while (*ct == '0')
++ct;
while (*ct && i < (ct2_size - 1)) {
*ct2++ = *ct++;
i++;
}
*ct2 = 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *pFmt) {
// if user name not there, then add it
static char ct[128+32+1];
char *cp;
if (!split_fields[1][0] || strncmp(split_fields[1], WOWSIG, WOWSIGLEN))
return split_fields[1];
cp = strchr(split_fields[1], '*');
if (cp) {
if (split_fields[1][WOWSIGLEN] == '0') {
StripZeros(split_fields[1], ct, sizeof(ct));
return ct;
}
return split_fields[1];
}
strnzcpy(ct, split_fields[1], 128);
cp = &ct[strlen(ct)];
*cp++ = '*';
strnzcpy(cp, split_fields[0], USERNAMELEN);
// upcase user name
enc_strupper(cp);
// Ok, if there are leading 0's for that binary resultant value, then remove them.
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt) {
static char ct[128+32+1];
char *cp;
strnzcpy(ct, ciphertext, 128+32+1);
cp = strchr(ct, '*');
if (cp) *cp = 0;
strupr(&ct[WOWSIGLEN]);
if (cp) *cp = '*';
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char b[FULL_BINARY_SIZE];
ARCH_WORD_32 dummy[1];
} out;
char *p, *q;
int i;
p = &ciphertext[WOWSIGLEN];
q = strchr(p, '$');
memset(out.b, 0, sizeof(out.b));
while (*p == '0')
++p;
if ((q-p)&1) {
out.b[0] = atoi16[ARCH_INDEX(*p)];
++p;
} else {
out.b[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 1; i < FULL_BINARY_SIZE; i++) {
out.b[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
if (p >= q)
break;
}
//dump_stuff_msg("binary", out.b, 32);
return out.b;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char b[SALT_SIZE];
ARCH_WORD_32 dummy;
} out;
char *p;
int length=0;
memset(out.b, 0, SALT_SIZE);
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// We need to know if this is odd length or not.
while (atoi16[ARCH_INDEX(*p++)] != 0x7f)
length++;
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// handle odd length hex (yes there can be odd length in these SRP files).
if ((length&1)&&atoi16[ARCH_INDEX(*p)] != 0x7f) {
length=0;
out.b[++length] = atoi16[ARCH_INDEX(*p)];
++p;
} else
length = 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7f && atoi16[ARCH_INDEX(p[1])] != 0x7f) {
out.b[++length] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
out.b[0] = length;
if (*p) {
++p;
memcpy(out.b + length+1, p, strlen(p)+1);
}
return out.b;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int salt_hash(void *salt)
{
unsigned int hash = 0;
char *p = (char *)salt;
while (*p) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
static void set_salt(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
memcpy(saved_salt, &cp[1], *cp);
saved_salt[*cp] = 0;
strcpy((char*)user_id, (char*)&cp[*cp+1]);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
enc_strupper(saved_key[index]);
}
static char *get_key(int index)
{
return saved_key[index];
}
// x = SHA1(s, H(U, ":", P));
// v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < count; ++j) {
SHA_CTX ctx;
unsigned char Tmp[20];
memset(crypt_out[j], 0, sizeof(crypt_out[j]));
SHA1_Init(&ctx);
SHA1_Update(&ctx, user_id, strlen((char*)user_id));
SHA1_Update(&ctx, ":", 1);
SHA1_Update(&ctx, saved_key[j], strlen(saved_key[j]));
SHA1_Final(Tmp, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_salt, strlen((char*)saved_salt));
SHA1_Update(&ctx, Tmp, 20);
SHA1_Final(Tmp, &ctx);
// Ok, now Tmp is v
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// printf ("salt=%s user=%s pass=%s, ", (char*)saved_salt, (char*)user_id, saved_key[j]);
// dump_stuff_msg("sha$h ", Tmp, 20);
//}
#ifdef HAVE_LIBGMP
{
unsigned char HashStr[80], *p;
int i, todo;
p = HashStr;
for (i = 0; i < 20; ++i) {
*p++ = itoa16[Tmp[i]>>4];
*p++ = itoa16[Tmp[i]&0xF];
}
*p = 0;
mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);
mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );
mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);
p = HashStr;
todo = strlen((char*)p);
if (todo&1) {
((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];
++p;
--todo;
} else {
((unsigned char*)(crypt_out[j]))[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
todo -= 2;
}
todo >>= 1;
for (i = 1; i <= todo; i++) {
((unsigned char*)(crypt_out[j]))[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
}
#else
// using oSSL's BN to do expmod.
pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,20,pSRP_CTX[j].z_exp);
BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);
BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (*((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[i])))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE);
}
struct fmt_main fmt_blizzard = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
8,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
matter.c | /** @file New way of calculating angular power spectra
*
* Nils Schöneberg, 16.10.2017 (edited for DESC until 20.01.2021)
*
*/
#include "matter.h"
#include "hypergeom.h"
#include "arrays.h"
#include "fft.h"
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
/**
* Anisotropy matter power spectra \f$ C_l\f$'s for all types, windows and initial conditions.
* The mode is always scalar.
*
* This routine evaluates all the \f$C_l\f$'s at a given value of l by
* interpolating in the pre-computed table. When relevant, it also
* sums over all initial conditions.
*
* This function can be called from whatever module at whatever time, provided that
* matter_init() has been called before, and matter_free() has not
* been called yet.
*
* @param pma Input: pointer to matter structure (containing pre-computed table)
* @param l Input: multipole array
* @param l_size Input: length of multipole array
* @param cl_tot Output: array with argument cl_tot[index_cltp_grid][index_l*pma->window_size[index_cltp_grid]+index_wd_grid] (must be already allocated)
* @return the error status
*/
int matter_cl_at_l(
struct matters* pma,
double * l_array,
int l_size,
double ** cl_tot
) {
/**
* Initialize local variables
* */
int last_index;
int index_cltp_grid,index_l;
int index_wd_grid;
int offset = 0;
last_index = 0;
/** Test for having calculated the cls */
class_test(pma->has_cls && pma->l_size<=0,pma->error_message,"Matter was never calculated. Cannot obtain Cl's");
/**
* We set those Cl's above l_max to 0
* */
for (index_cltp_grid=0; index_cltp_grid<pma->cltp_grid_size; index_cltp_grid++){
for (index_l=0;index_l<l_size;++index_l){
class_test(l_array[index_l]>pma->l_sampling[pma->l_size-1], pma->error_message, "Requested l=%g is larger than maximum l in matter : %g \n",l_array[index_l],pma->l_sampling[pma->l_size-1]);
/**
* Interpolate at given value of l for all windows at once
* */
class_call(matter_interpolate_spline_growing_hunt(
pma->l_sampling,
pma->l_size,
pma->cl[index_cltp_grid],
pma->ddcl[index_cltp_grid],
pma->window_size[index_cltp_grid],
l_array[index_l],
&last_index,
cl_tot[index_cltp_grid]+index_l*pma->window_size[index_cltp_grid],
pma->error_message),
pma->error_message,
pma->error_message);
}
}
return _SUCCESS_;
}
/**
* Initialize the matter construct.
*
* This initializes the matter construct, and computes the \f$C_l\f$'s
* of flat space for number count/shear.
*
* This function must be called before any internal calls, and before
* matter_free()
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @param ppm Input: pointer to primordial structure
* @param pnl Input: pointer to nonlinear structure
* @param pma Input/Output: pointer to matters structure
* @return the error status
*/
int matter_init(
struct matters * pma
){
/** Summary: */
/** - Check for computational flags */
if (pma->has_cls == _FALSE_){
/* In this case, the user requested deactivation of this module, skip everything */
return _SUCCESS_;
}
/* Otherwise start computation */
else if (pma->matter_verbose > 0){
fprintf(stdout,"Computing matter spectra\n");
}
if(pma->matter_verbose > MATTER_VERBOSITY_PARAMETERS){
printf(" -> Verbosity set to %i \n",pma->matter_verbose);
}
#ifdef _OPENMP
double point0_time = omp_get_wtime();
#endif
/**
* - Copy parameters from the other structs
* */
/* (These should be assigned BEFORE any call to matter_obtain_indices) */
pma->tau_size_max = 100;
pma->tau_size = MIN(2*(int)(0.5*pma->tau_size+0.25),pma->tau_size_max);
pma->angular_rescaling = 1.;
/**
* - Set "fixed" flags
* */
pma->uses_density_splitting = _FALSE_;
pma->uses_all_l_sampling = _FALSE_;
pma->uses_lensing_reduction = _TRUE_;
pma->uses_rsd_combination = _TRUE_;
pma->uses_limber_approximation = _FALSE_;
pma->uses_relative_factors = _FALSE_;
pma->uses_bessel_storeall = _FALSE_;
pma->uses_integration = matter_integrate_tw_t;
pma->uses_intxi_symmetrized = _TRUE_;
pma->uses_intxi_asymptotic = _FALSE_;
pma->uses_intxi_logarithmic = _TRUE_;
pma->uses_intxi_interpolation = _TRUE_;
/**
* - Obtain indices required for later evaluation
* */
class_call(matter_obtain_indices(pma),
pma->error_message,
pma->error_message);
class_call(matter_obtain_window_indices(pma),
pma->error_message,
pma->error_message);
class_call(matter_obtain_bi_indices(pma),
pma->error_message,
pma->error_message);
//Tw size: 30 for lens and dens
//IntTw size: 75 for lens and dens
//t spline size: 50 for lens, 20 for dens
pma->bessel_recursion_t_size = 200;
pma->size_fft_result = pma->size_fft_cutoff;
pma->bi_maximal_t_offset = 1.e-7;
pma->tau_grid_size = pma->tau_size*pma->tau_size;
//Offset for keeping numerical instabilities for
//log(x) small for x->0 (e.g. exp(log(x1)-log(x2) ) = 0 for x1 approx = 0, not NaN
//Also used for setting exp(log(x))!=x to a value that does not over/underflow the bounds
pma->small_log_offset = 1e-10;
pma->has_nc_rsd = _FALSE_;
/**
* - Test wether the defined combinations would give a valid caclulation
* */
pma->selection = gaussian;
class_test(pma->selection == dirac && pma->has_nc_rsd,
pma->error_message,
"Including redshift space distortions for dirac functions not yet implemented.");
class_test(pma->selection == tophat && pma->has_nc_rsd,
pma->error_message,
"Including redshift space distortions for tophat functions not yet implemented.");
class_test(pma->size_fft_cutoff>pma->size_fft_result,
pma->error_message,
"the coefficient cutoff size (%i) has to be smaller or equal to the result size (%i)",
pma->size_fft_cutoff,pma->size_fft_result);
class_test(pma->tau_size%2!=0,
pma->error_message,
"The tau_size parameter currently has to be a multiple of 2");
/* Done testing parameter combinations */
#ifdef _OPENMP
double point1_time = omp_get_wtime();
#endif
#ifdef _OPENMP
double point2_time = omp_get_wtime();
#endif
#ifdef _OPENMP
double point3_time = omp_get_wtime();
#endif
/**
* - Now all sources are obtained, and we can proceed
* with calculating the FFT in logarithmic k space
* */
double* fft_coeff_real;
double* fft_coeff_imag;
class_call(matter_obtain_coeff_sampling(pma),
pma->error_message,
pma->error_message);
class_call(matter_FFTlog_perturbation_sources_parallel(pma,&fft_coeff_real,&fft_coeff_imag),
pma->error_message,
pma->error_message);
if(!pma->uses_separability){
/*
* This function !replaces! the fft coefficients with their (nearly) constant counterparts
* The originals can be obtained by re-multiplying with the growth factors
* */
class_call(matter_obtain_nonseparability(
pma,
&fft_coeff_real,
&fft_coeff_imag),
pma->error_message,
pma->error_message);
}
pma->fft_real = fft_coeff_real;
pma->fft_imag = fft_coeff_imag;
#ifdef _OPENMP
double point4_time = omp_get_wtime();
#endif
class_call(matter_obtain_l_sampling(pma),
pma->error_message,
pma->error_message);
/**
* - There are two big ways of obtaining the bessel integrals
*
* 1) Use the recursion relation of the bessel integrals
* This proves to be really fast, and surprisingly even more accurate
*
* 2) Using the direct representations through taylor series
* This older method proves to become unreliable due to
* floating point arithmetics, especially around
* high imaginary parts in nu, large l, and t of around 0.9-0.99
*
* We first obtain the integrals for a pre-made grid of t values,
* after which we spline them for exactly those t values that we require evaluation at
*
* This way the initial t sampling is independent of l and nu,
* while the final one can and does depend on the precise nature
* of the window functions etc.
* */
short is_correct_file = pma->uses_bessel_store;
class_call(matter_read_bessel_file_correct(pma,&is_correct_file),
pma->error_message,
pma->error_message);
if(!pma->uses_limber_approximation && !is_correct_file){
/**
* - Obtain the bessel integrals
* */
class_call(matter_obtain_bessel_recursion_parallel(pma),
pma->error_message,
pma->error_message);
/**
* - Spline bessel integrals
* */
class_call(matter_spline_bessel_integrals_recursion(pma),
pma->error_message,
pma->error_message);
if(pma->uses_bessel_store){
class_call(matter_write_bessel_integrals(pma),pma->error_message,pma->error_message);
}
}
if(is_correct_file){
class_call(matter_read_bessel_integrals(pma),pma->error_message,pma->error_message);
}
//Ifend obtain bessel integrals
/* Done getting Bessel integrals and l sampling */
#ifdef _OPENMP
double point5_time = omp_get_wtime();
#endif
#ifdef _OPENMP
double point6_time = omp_get_wtime();
#endif
/* Get t sampling */
class_call(matter_obtain_t_sampling(pma),
pma->error_message,
pma->error_message);
#ifdef _OPENMP
double point7_time = omp_get_wtime();
#endif
/**
* - Now we have truly assembled all ingredients
* to integrate the final \f$C_\ell\f$'s
* a) We have the window functions (including growth factors)
* b) We have the power law exponents
* c) We have the FFT coefficients
* d) We have the bessel integrals
* e) We have all relevant sampling grids
*
* Thus we are finally able to obtain the \f$C_\ell\f$'s
* */
class_call(matter_integrate_cl(pma,
fft_coeff_real,
fft_coeff_imag),
pma->error_message,
pma->error_message);
/* Done integrating Cl's */
#ifdef _OPENMP
double point8_time = omp_get_wtime();
#endif
/**
* - Finally we spline the \f$C_\ell\f$'s to interpolate
* for all \f$\ell\f$'s
* */
class_call(matter_spline_cls(pma),
pma->error_message,
pma->error_message);
/* Done splining */
#ifdef _OPENMP
double point9_time = omp_get_wtime();
#endif
/**
* - If desired, we give a summary of the program's running configuration
* and/or the timings of each part of the program
* */
if(pma->matter_verbose > MATTER_VERBOSITY_PARAMETERS){
printf("\n\n\n PARAMETER SUMMARY \n\n ");
printf("Calculationary parameters \n");
printf(" -> tilt == %.10e \n",pma->bias);
printf(" -> nu_imag max == %.10e \n",pma->nu_imag[pma->size_fft_result-1]);
printf(" -> nu_imag step == %.10e \n",pma->nu_imag[1]);
printf(" -> k_max == %.10e \n",pma->k_sampling[pma->size_fft_input-1]);
printf(" -> k_min == %.10e \n",pma->k_sampling[0]);
printf(" -> delta log(k) == %.10e \n \n\n",pma->deltalogk);
printf(" -> tau0 == %.10e \n",pma->tau0);
printf("Parameter counts \n");
printf(" -> Number of types %i \n",pma->stp_size);
printf(" -> Number of radials %i \n",pma->radtp_size_total);
printf(" -> Number of bessel integrals %i \n",pma->bitp_size);
printf(" -> Number of tilts %i \n",pma->tilt_size);
printf("Parameter options as follows : \n");
printf(" -> Parameter '%s' has value %s \n","has_cls",(pma->has_cls?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses integration",(pma->uses_integration==matter_integrate_tw_t?"tw_t":"tw_logt"));
printf(" -> Parameter '%s' has value %s \n","uses seperability",(pma->uses_separability?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","allow extrapolation",(pma->allow_extrapolation?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses density spltting",(pma->uses_density_splitting?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses intxi_interpolation",(pma->uses_intxi_interpolation?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses intxi_logarithmic",(pma->uses_intxi_logarithmic?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses intxi_symmetric",(pma->uses_intxi_symmetrized?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses intxi_asymptotic",(pma->uses_intxi_asymptotic?"TRUE":"FALSE"));
//printf(" -> Parameter '%s' has value %s \n","uses analytic bessel",(pma->uses_bessel_analytic_integration?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses all ell",(pma->uses_all_l_sampling?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses RSD combination",(pma->uses_rsd_combination?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses relative factors",(pma->uses_relative_factors?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %s \n","uses limber approximation",(pma->uses_limber_approximation?"TRUE":"FALSE"));
printf(" -> Parameter '%s' has value %i \n","window number",pma->num_windows);
printf(" -> Parameter '%s' has value %i \n","nondiagonals",pma->non_diag);
printf(" -> Parameter '%s' has value %i \n","tw size",pma->tw_size);
printf(" -> Parameter '%s' has value %i \n","tw integrated size",pma->integrated_tw_size);
printf(" -> Parameter '%s' has value %i \n","t size",pma->t_size);
printf(" -> Parameter '%s' has value %i \n","max coeff",pma->size_fft_cutoff);
}
#ifdef _OPENMP
if(pma->matter_verbose > MATTER_VERBOSITY_TIMING){
printf("\n\n\n TIMING SUMMARY (for %10d spectra, %5d l values) \n\n",((pma->non_diag+1)*(2*pma->num_windows-pma->non_diag))/2,pma->l_size);
printf("Initialization %15f seconds \n",point1_time-point0_time);
printf("k,tau Sampling + Sources %15f seconds \n",point2_time-point1_time);
printf("Growth Factor + Sampled Sources %15f seconds \n",point3_time-point2_time);
printf("FFTlog of Sampled Sources %15f seconds \n",point4_time-point3_time);
printf("Bessel Integrals %15f seconds \n",point5_time-point4_time);
printf("Calculating Window Functions %15f seconds \n",point6_time-point5_time);
printf("Resample Growth Factor %15f seconds \n",point7_time-point6_time);
printf("Integrating Cl's %15f seconds (%15f sec per spectrum)\n",point8_time-point7_time,(point8_time-point7_time)/(((pma->non_diag+1)*(2*pma->num_windows-pma->non_diag))/2));
printf("Freeing resources + Spline Cl's %15f seconds \n",point9_time-point8_time);
}
#endif
/* Done timing/parameter outputs */
#ifdef _OPENMP
double point10_time = omp_get_wtime();
#endif
#ifdef _OPENMP
if(pma->matter_verbose>MATTER_VERBOSITY_TIMING){
printf("Matter took %15f seconds \n",point10_time-point0_time);
}
/* Done matter module */
#endif
return _SUCCESS_;
}
/**
* Free all memory occupied by the matter module
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_free(
struct matters * pma
) {
int i,j;
if(pma->has_cls){
if(pma->matter_verbose>MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Free \n");
}
if(pma->matter_verbose>MATTER_VERBOSITY_DELETE){
printf("Freeing fft-related quantities \n");
}
//free(pma->logk_sampling);
//free(pma->k_sampling);
//free(pma->tau_sampling);
if(pma->matter_verbose>MATTER_VERBOSITY_DELETE){
printf("Freeing up window-related quantities \n");
}
//free(pma->tw_sampling);
//free(pma->tw_weights);
//free(pma->integrated_tw_sampling);
//free(pma->integrated_tw_weights);
//free(pma->exp_integrated_tw_sampling);
//free(pma->tw_max);
//free(pma->tw_min);
free(pma->t_sampling);
free(pma->t_weights);
if(pma->uses_intxi_interpolation){
free(pma->t_spline_sampling);
}
free(pma->fft_real);
free(pma->fft_imag);
if(pma->matter_verbose>MATTER_VERBOSITY_DELETE){
printf("Freeing up bessel memory \n");
}
free(pma->nu_real);
free(pma->nu_imag);
if(!pma->uses_limber_approximation){
for(j=0;j<pma->tilt_grid_size;++j){
for(i=0;i<pma->l_size_recursion*pma->size_fft_cutoff;++i){
int delta = (i/pma->size_fft_cutoff);
int index = delta*pma->size_fft_result+(i-pma->size_fft_cutoff*delta);
free(pma->bi_real[j][index]);
free(pma->bi_imag[j][index]);
free(pma->ddbi_real[j][index]);
free(pma->ddbi_imag[j][index]);
}
free(pma->bi_real[j]);
free(pma->bi_imag[j]);
free(pma->ddbi_real[j]);
free(pma->ddbi_imag[j]);
free(pma->bi_size[j]);
free(pma->bi_max[j]);
}
free(pma->bi_real);
free(pma->bi_imag);
free(pma->ddbi_real);
free(pma->ddbi_imag);
free(pma->bi_sampling);
free(pma->bi_size);
free(pma->bi_max);
}
if(pma->matter_verbose>MATTER_VERBOSITY_DELETE){
printf("Freeing up general memory \n");
}
free(pma->l_sampling);
if(pma->has_cltp_nc){
if(pma->has_bitp_normal){
free(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_normal]);
}
if(pma->has_bitp_nu_reduced){
free(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced]);
}
if(pma->has_bitp_lfactor){
free(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_lfactor]);
}
}
if(pma->has_cltp_sh){
if(pma->has_bitp_lfactor){
free(pma->radtps_of_bitp[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_lfactor]);
}
}
free(pma->radtps_of_bitp);
free(pma->radtp_of_bitp_size);
for(i=0;i<pma->cltp_grid_size;++i){
free(pma->window_index_start[i]);
free(pma->window_index_end[i]);
}
free(pma->window_index_start);
free(pma->window_index_end);
free(pma->window_size);
for(i=0;i<pma->ic_ic_size;++i){
for(j=0;j<pma->cltp_grid_size;++j){
free(pma->cl[i*pma->cltp_grid_size+j]);
free(pma->ddcl[i*pma->cltp_grid_size+j]);
}
}
free(pma->cl);
free(pma->ddcl);
}
return _SUCCESS_;
}
/**
* Free workspace
*
* @param pma Input: pointer to matter structure
* @param pmw Input: pointer to matter workspace structure
* @return the error status
*/
int matter_workspace_free(struct matters* pma,
struct matters_workspace* pmw){
int index_radtp1_radtp2,index_l,index_tilt_grid,index_coeff;
free(pmw->pref_real);
free(pmw->pref_imag);
/**
* Finally delete the intxi storage arrays again
* */
if(pma->uses_intxi_interpolation){
for(index_radtp1_radtp2=0;index_radtp1_radtp2<pma->radtp_grid_size;++index_radtp1_radtp2){
free(pmw->intxi_spline_real[index_radtp1_radtp2]);
free(pmw->intxi_spline_imag[index_radtp1_radtp2]);
free(pmw->ddintxi_spline_real[index_radtp1_radtp2]);
free(pmw->ddintxi_spline_imag[index_radtp1_radtp2]);
}
free(pmw->intxi_spline_real);
free(pmw->intxi_spline_imag);
free(pmw->ddintxi_spline_real);
free(pmw->ddintxi_spline_imag);
}
for(index_radtp1_radtp2=0;index_radtp1_radtp2<pma->radtp_grid_size;++index_radtp1_radtp2){
free(pmw->intxi_real[index_radtp1_radtp2]);
free(pmw->intxi_imag[index_radtp1_radtp2]);
}
free(pmw->intxi_real);
free(pmw->intxi_imag);
//End n_thread
for(index_l=0;index_l<pma->l_size;++index_l){
for(index_tilt_grid=0;index_tilt_grid<pma->tilt_grid_size;++index_tilt_grid){
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
free(pmw->window_bessel_real[index_l][index_tilt_grid*pma->size_fft_result+index_coeff]);
free(pmw->window_bessel_imag[index_l][index_tilt_grid*pma->size_fft_result+index_coeff]);
}
//End coeff
}
//End tilt grid
free(pmw->window_bessel_real[index_l]);
free(pmw->window_bessel_imag[index_l]);
}
//End l
free(pmw->window_bessel_real);
free(pmw->window_bessel_imag);
return _SUCCESS_;
}
/**
* Allocate workspace
*
* @param pma Input: pointer to matter structure
* @param pmw Input: pointer to matter workspace structure
* @return the error status
*/
int matter_workspace_alloc(struct matters* pma,
struct matters_workspace* pmw){
int index_radtp1_radtp2,index_l,index_tilt_grid,index_coeff;
/**
* Prefactor allocations
* */
int tw_max_size = 0;
if(pma->has_unintegrated_windows){
tw_max_size = MAX(tw_max_size,pma->tw_size);
}
if(pma->has_integrated_windows){
tw_max_size = MAX(tw_max_size,pma->integrated_tw_size);
}
pmw->tau_max_size = tw_max_size;
if(pma->uses_intxi_symmetrized){
class_alloc(pmw->pref_real,
2*tw_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pmw->pref_imag,
2*tw_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
}
else{
class_alloc(pmw->pref_real,
tw_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pmw->pref_imag,
tw_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
}
/**
* Now allocate local arrays to store the function f_n^{ij}(t) in
*
* These can theoretically become quite big,
* so we allocate a single one for every window and ic combination
* */
class_alloc(pmw->intxi_real,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->intxi_imag,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
for(index_radtp1_radtp2=0;index_radtp1_radtp2<pma->radtp_grid_size;++index_radtp1_radtp2){
class_alloc(pmw->intxi_real[index_radtp1_radtp2],
pma->size_fft_result*pma->t_size*sizeof(double),
pma->error_message);
class_alloc(pmw->intxi_imag[index_radtp1_radtp2],
pma->size_fft_result*pma->t_size*sizeof(double),
pma->error_message);
}
/**
* If we desire interpolation, those arrays also have to be allocated
* */
if(pma->uses_intxi_interpolation){
class_alloc(pmw->intxi_spline_real,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->intxi_spline_imag,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->ddintxi_spline_real,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->ddintxi_spline_imag,
pma->radtp_grid_size*sizeof(double*),
pma->error_message);
for(index_radtp1_radtp2=0;index_radtp1_radtp2<pma->radtp_grid_size;++index_radtp1_radtp2){
class_alloc(pmw->intxi_spline_real[index_radtp1_radtp2],
pma->size_fft_result*pma->t_spline_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->intxi_spline_imag[index_radtp1_radtp2],
pma->size_fft_result*pma->t_spline_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->ddintxi_spline_real[index_radtp1_radtp2],
pma->size_fft_result*pma->t_spline_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->ddintxi_spline_imag[index_radtp1_radtp2],
pma->size_fft_result*pma->t_spline_size*sizeof(double*),
pma->error_message);
}
}
/**
* Now allocate bessel arrays
* */
class_alloc(pmw->window_bessel_real,
pma->l_size*sizeof(double**),
pma->error_message);
class_alloc(pmw->window_bessel_imag,
pma->l_size*sizeof(double**),
pma->error_message);
for(index_l=0;index_l<pma->l_size;++index_l){
class_alloc(pmw->window_bessel_real[index_l],
pma->tilt_grid_size*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pmw->window_bessel_imag[index_l],
pma->tilt_grid_size*pma->size_fft_result*sizeof(double*),
pma->error_message);
for(index_tilt_grid=0;index_tilt_grid<pma->tilt_grid_size;++index_tilt_grid){
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
class_alloc(pmw->window_bessel_real[index_l][index_tilt_grid*pma->size_fft_result+index_coeff],
(pma->has_integrated_windows?2:1)*pma->t_size*sizeof(double),
pma->error_message);
class_alloc(pmw->window_bessel_imag[index_l][index_tilt_grid*pma->size_fft_result+index_coeff],
(pma->has_integrated_windows?2:1)*pma->t_size*sizeof(double),
pma->error_message);
}
//End coeff
}
//End tilt grid
}
//End l
return _SUCCESS_;
}
/**
* Allocate the matter vector within the workspace
*
* @param pma Input: pointer to matter structure
* @param pmw Input: pointer to matter workspace structure
* @return the error status
*/
int matter_vector_alloc(struct matters* pma,
struct matters_workspace* pmw){
int n_thread,index_tw;
#ifdef _OPENMP
pmw->N_threads = omp_get_max_threads();
#else
pmw->N_threads = 1;
#endif
class_alloc(pmw->pmv,
pmw->N_threads*sizeof(struct matters_vector*),
pma->error_message);
for(n_thread=0;n_thread<pmw->N_threads;++n_thread){
class_alloc(pmw->pmv[n_thread],
sizeof(struct matters_vector),
pma->error_message);
}
/**
* First allocalte fft coefficient arrays
* */
for(n_thread=0;n_thread<pmw->N_threads;++n_thread){
class_alloc(pmw->pmv[n_thread]->window_fft_real,
2*pmw->tau_max_size*sizeof(double*),
pma->error_message);
class_alloc(pmw->pmv[n_thread]->window_fft_imag,
2*pmw->tau_max_size*sizeof(double*),
pma->error_message);
if(!pma->uses_separability){
for(index_tw=0;index_tw<2*pmw->tau_max_size;++index_tw){
class_alloc(pmw->pmv[n_thread]->window_fft_real[index_tw],
pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pmw->pmv[n_thread]->window_fft_imag[index_tw],
pma->size_fft_result*sizeof(double),
pma->error_message);
}
//End index_tw
}
//Ifend separability
if(pma->uses_intxi_symmetrized){
class_alloc(pmw->pmv[n_thread]->integrand_real,
2*pmw->tau_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pmw->pmv[n_thread]->integrand_imag,
2*pmw->tau_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
}
else{
class_alloc(pmw->pmv[n_thread]->integrand_real,
pmw->tau_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pmw->pmv[n_thread]->integrand_imag,
pmw->tau_max_size*pma->size_fft_result*sizeof(double),
pma->error_message);
}
}
return _SUCCESS_;
}
/**
* Free matter vector within the matter workspace
*
* @param pma Input: pointer to matter structure
* @param pmw Input: pointer to matter workspace structure
* @return the error status
*/
int matter_vector_free(struct matters* pma,
struct matters_workspace* pmw){
int n_thread,index_tw;
for(n_thread=0;n_thread<pmw->N_threads;++n_thread){
free(pmw->pmv[n_thread]->integrand_real);
free(pmw->pmv[n_thread]->integrand_imag);
if(!pma->uses_separability){
for(index_tw=0;index_tw<2*pmw->tau_max_size;++index_tw){
free(pmw->pmv[n_thread]->window_fft_real[index_tw]);
free(pmw->pmv[n_thread]->window_fft_imag[index_tw]);
}
//End tw
}
//Ifend sep
free(pmw->pmv[n_thread]->window_fft_real);
free(pmw->pmv[n_thread]->window_fft_imag);
}
for(n_thread=0;n_thread<pmw->N_threads;++n_thread){
free(pmw->pmv[n_thread]);
}
free(pmw->pmv);
return _SUCCESS_;
}
/**
* Spline the final Cl's
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_spline_cls(
struct matters* pma
){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Splining the final Cl's \n");
}
int index_cltp_grid;
int index_wd1_wd2;
class_alloc(pma->ddcl,
pma->cltp_grid_size*sizeof(double*),
pma->error_message);
for(index_cltp_grid=0;index_cltp_grid<pma->cltp_grid_size;++index_cltp_grid){
{
class_alloc(pma->ddcl[index_cltp_grid],
pma->window_size[index_cltp_grid]*pma->l_size*sizeof(double),
pma->error_message);
for(index_wd1_wd2=0;index_wd1_wd2<pma->window_size[index_cltp_grid];++index_wd1_wd2){
array_spline_table_columns(pma->l_sampling,
pma->l_size,
pma->cl[index_cltp_grid]+index_wd1_wd2*pma->l_size,
1,
pma->ddcl[index_cltp_grid]+index_wd1_wd2*pma->l_size,
_SPLINE_EST_DERIV_,
pma->error_message);
}
//End wd
}
//End ic_ic
}
//End cl tp
return _SUCCESS_;
}
/**
* Obtain sampling for the fft coefficients
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_coeff_sampling(
struct matters * pma
){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Obtaining FFT coefficient sampling\n");
}
int index_coeff;
int index_tilt1,index_tilt2,index_tilt1_tilt2;
double current_tilt_offset;
class_alloc(pma->nu_real,
pma->tilt_grid_size*sizeof(double),
pma->error_message);
/**
* The real part of the coefficient
* depends only on the tilt
* We have to iterate thus through every
* possible combination of tilts.
* */
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
if(matter_is_index(index_tilt1,pma->tilt_index_normal,pma->has_tilt_normal)){
if(matter_is_index(index_tilt2,pma->tilt_index_normal,pma->has_tilt_normal)){
current_tilt_offset = 0.0;
}
else if(matter_is_index(index_tilt2,pma->tilt_index_reduced,pma->has_tilt_reduced)){
current_tilt_offset = 2.0;
}
else{
class_stop(pma->error_message,"Tilt index %i not recognized",index_tilt2);
}
}
else if(matter_is_index(index_tilt1,pma->tilt_index_reduced,pma->has_tilt_reduced)){
if(matter_is_index(index_tilt2,pma->tilt_index_normal,pma->has_tilt_normal)){
current_tilt_offset = 2.0;
}
else if(matter_is_index(index_tilt2,pma->tilt_index_reduced,pma->has_tilt_reduced)){
current_tilt_offset = 4.0;
}
else{
class_stop(pma->error_message,"Tilt index %i not recognized",index_tilt2);
}
}
else{
class_stop(pma->error_message,"Tilt index %i not recognized",index_tilt1);
}
if(pma->matter_verbose > MATTER_VERBOSITY_RANGES){
printf(" -> Found nu_real = %f (offset from bias : %f) \n",pma->bias-current_tilt_offset,current_tilt_offset);
}
pma->nu_real[index_tilt1_tilt2]=pma->bias-current_tilt_offset;
}
//End tilt2
}
//End tilt1
/**
* The imaginary part of the coefficients
* depends only on the coefficient index
* */
class_alloc(pma->nu_imag,
pma->size_fft_result*sizeof(double),
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
/**
* The factor of (N-1)/N might at first seem confusing, but it is necessary and mathematically correct:
*
* For any FFT, we want factors of exp(2*pi*i*m*n)
* In our case, the FFT goes over log(k),
* which was sampled as k_m = k_0 * exp(m/(N-1)*dkap)
* (where dkap = log(k_max)-log(k_min)
* Notice the N-1. This is included to let k_(N-1) = k_max
*
* However, this (N-1) factor is not the one required by the FFT exponential
* The (N-1)/N is a sort of correction for this fact
*
* For this, let us calculate k_m*k^(nu_imag_n)
* k_m = k_0 * exp(m/(N-1)*dkap)
* k^(nu_imag_n) = exp(2*pi*n/dkap *(N-1)/N)
*
* =>
* k_m k^(nu_imag_n) = k_0*exp(2*pi*m*n/N)
* Which is exactly of the form we want
*
* It was very important here, that the N-1 factor should cancel,
* which is only possible if we include this correction factor here
* */
pma->nu_imag[index_coeff]=_TWOPI_*(((double)(index_coeff))/(pma->deltalogk))*((double)pma->size_fft_input-1)/((double)pma->size_fft_input);
}
//End coeff
return _SUCCESS_;
}
/**
* Obtain sampling for the l values
*
* @param ppr Input: pointer to precision structure
* @param pth Input: pointer to thermo structure
* @param ppt Input: pointer to perturbation structure
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_l_sampling(
struct matters * pma
){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Obtaining l sampling\n");
}
int index_l;
int current_l;
int increment;
int l_min = 2;
int l_max = pma->l_lss_max;
//The smallest stepsize is 1, so we can safely assume the maximum size being l_max
class_alloc(pma->l_sampling,
l_max*sizeof(double),
pma->error_message);
if(!pma->uses_all_l_sampling){
/**
* This is the normal logarithmic sampling that you will also
* see in other parts of class, like e.g. the spectra module
*
*
* We start from l = 2 and increase it with a logarithmic step
* */
index_l = 0;
current_l = l_min;
increment = MAX((int)(current_l * (pow(pma->l_logstep,pma->angular_rescaling)-1.)),1);
pma->l_sampling[index_l]=current_l;
while (((current_l+increment) < l_max) &&
(increment < pma->l_linstep*pma->angular_rescaling)) {
index_l ++;
current_l += increment;
pma->l_sampling[index_l]=current_l;
increment = MAX((int)(current_l * (pow(pma->l_logstep,pma->angular_rescaling)-1.)),1);
}
/**
* When the logarithmic step becomes larger than some linear step,
* stick to this linear step until we reach l_max
* */
increment = MAX((int)(pma->l_linstep*pma->angular_rescaling+0.5),1);
while ((current_l+increment) <= l_max) {
index_l ++;
current_l += increment;
pma->l_sampling[index_l]=current_l;
}
/**
* The last value has to be set to exactly l_max
* (Otherwise there would be out-of-bounds problems with splining)
* Of course we only need to add the additonal
* value of l_max, if we don't already hit it
* by accident
* */
if (current_l != l_max) {
index_l ++;
current_l = l_max;
pma->l_sampling[index_l]=current_l;
}
pma->l_size = index_l+1;
class_realloc(pma->l_sampling,
pma->l_sampling,
(index_l+1)*sizeof(double),
pma->error_message);
}
else{
/**
* The l_size is l_max-1,
* thus index_l is smaller or equal to l_max-2
* thus index_l+2 is smaller or equal to l_max,
* as desired
* */
pma->l_size = l_max-1;
for(index_l=0;index_l<pma->l_size;++index_l){
pma->l_sampling[index_l]=index_l+2;
}
class_realloc(pma->l_sampling,
pma->l_sampling,
pma->l_size*sizeof(double),
pma->error_message);
}
pma->l_size_recursion = (int)(pma->uses_bessel_storeall?(pma->l_sampling[pma->l_size-1]+1):pma->l_size);
return _SUCCESS_;
}
/**
* Obtain sampling for the t values (the values of relative distance along line of sight)
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_t_sampling(struct matters* pma){
/**
* We want to do a normal integration,
* if we do not use limber approximation
* Otherwise we only have the value t=1
* */
if(!pma->uses_limber_approximation){
class_alloc(pma->t_sampling,
pma->t_size*sizeof(double),
pma->error_message);
class_alloc(pma->t_weights,
pma->t_size*sizeof(double),
pma->error_message);
class_call(array_weights_gauss_limits(
pma->t_sampling,
pma->t_weights,
0.0,//1e-8 for trapz integration
1.0,
pma->t_size,
gauss_type_legendre,//gauss_type_legendre_half,//gauss_type_trapezoid,//gauss_type_legendre,
pma->error_message),
pma->error_message,
pma->error_message);
}
else{
class_alloc(pma->t_sampling,
sizeof(double),
pma->error_message);
class_alloc(pma->t_weights,
sizeof(double),
pma->error_message);
pma->t_size=1;
pma->t_sampling[0]=1.0;
pma->t_weights[0]=1.0;
}
/**
* If we want to obtain f_n^{ij}(t) for a subset of
* t values and spline it for all others,
* we can create a seperate sampling (t_spline_sampling)
* Since we never integrate over that sampling,
* we ingore the weights returned from this method
*
* Otherwise t spline and t are the same
* */
if(pma->uses_intxi_interpolation){
double* ignore;
class_alloc(ignore,
pma->t_spline_size*sizeof(double),
pma->error_message);
class_alloc(pma->t_spline_sampling,
pma->t_spline_size*sizeof(double),
pma->error_message);
class_call(array_weights_gauss_limits(
pma->t_spline_sampling,
ignore,
0.0,//+1e-8 ? for trapz integration
1.0,
pma->t_spline_size,
gauss_type_trapezoid,//gauss_type_trapezoid,//gauss_type_legendre,
pma->error_message),
pma->error_message,
pma->error_message);
double a = 0.75;//0.75;lens//1.0;//0.5;
int index_t;
for(index_t=0;index_t<pma->t_spline_size;++index_t){
double t = pma->t_spline_sampling[index_t];
pma->t_spline_sampling[index_t]=(1.-a)*t+a*(2.*t-t*t);
}
free(ignore);
}
else{
pma->t_spline_size = pma->t_size;
pma->t_spline_sampling = pma->t_sampling;
}
//Ifend xi interpolation
return _SUCCESS_;
}
/**
* Interpolate a prepared window
*
* @param pma Input: pointer to matter structure
* @param tau Input: value of conformal time
* @param index_ic Input: index of initial condition
* @param index_radtp Input: index of radial type
* @param index_wd Input: index of window type
* @param last Input/Output: last succesful interpolation
* @param derivative_type Input: type of bessel derivative
* @param win_val Output: value of window function
* @return the error status
*/
int matter_get_prepared_window_at(
struct matters* pma,
double tau,
int index_ic,
int index_radtp,
int index_wd,
int* last,
int derivative_type,
double* win_val
){
double a,b,h;
index_ic = 0;
if(derivative_type>=0){
if(tau<pma->ptw_sampling[index_wd*pma->ptw_size] || tau>pma->ptw_sampling[(index_wd+1)*pma->ptw_size-1]){
*win_val = 0.0;
return _SUCCESS_;
}
class_call(matter_spline_hunt(pma->ptw_sampling+index_wd*pma->ptw_size,
pma->ptw_size,
tau,
last,
&h,
&a,
&b,
pma->error_message),
pma->error_message,
pma->error_message);
int last_index = *last;
if(derivative_type==0){
*win_val = a*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
+b*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1];
}
if(derivative_type==1){
*win_val = a*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
+b*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1];
}
if(derivative_type==2){
*win_val = a*pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
+b*pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1];
}
if(derivative_type==3){
class_stop(pma->error_message,"Currently this is not implemented ...");
/* *win_val = a*(
-pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
-2.0/(pma->tau0-tau)*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
+(l*(l+1.0)-2.0)*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]/(pma->tau0-tau)/(pma->tau0-tau)
)
+b*(
-pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]
-2.0/(pma->tau0-tau)*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]
+(l*(l+1.0)-2.0)*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]/(pma->tau0-tau)/(pma->tau0-tau)
);*/
}
if(derivative_type==4){
*win_val = a*(
-pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
-2.0/(pma->tau0-tau)*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]
-2.0*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index]/(pma->tau0-tau)/(pma->tau0-tau)
)
+b*(
-pma->ptw_ddwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]
-2.0/(pma->tau0-tau)*pma->ptw_dwindow[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]
-2.0*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_size+last_index+1]/(pma->tau0-tau)/(pma->tau0-tau)
);
}
}
else if(derivative_type==-1){
if(tau<pma->ptw_integrated_sampling[index_wd*pma->ptw_integrated_size] ||
tau>pma->ptw_integrated_sampling[index_wd*pma->ptw_integrated_size+pma->ptw_integrated_size-1]){
*win_val = 0.0;
return _SUCCESS_;
}
class_call(matter_spline_hunt(pma->ptw_integrated_sampling+index_wd*pma->ptw_integrated_size,
pma->ptw_integrated_size,
tau,
last,
&h,
&a,
&b,
pma->error_message),
pma->error_message,
pma->error_message);
int last_index = *last;
*win_val = a*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_integrated_size+last_index]
+b*pma->ptw_window[index_ic*pma->radtp_size_total+index_radtp][index_wd*pma->ptw_integrated_size+last_index+1];
}
return _SUCCESS_;
}
/**
* Obtain the indices
*
* @param ppm Input: pointer to primordial structure
* @param ppt Input: pointer to perturbs structure
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_indices(
struct matters* pma
){
if(pma->matter_verbose> MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Obtaining indices (source types and perturbation types) \n");
}
/**
* Setting flags of which types are included
* */
pma->has_cltp_nc = _TRUE_;//pma->has_cl_number_count;
pma->has_stp_delta_m = _TRUE_;//pma->has_nc_density;
pma->has_redshift_space_distortion = _FALSE_;//pma->has_nc_rsd;
pma->has_lensing_terms = _FALSE_;//pma->has_nc_lens;
pma->has_gravitational_terms = _FALSE_;//pma->has_nc_gr;
pma->has_doppler_terms = _FALSE_;//pma->has_redshift_space_distortion;
pma->has_cltp_sh = _TRUE_;//pma->has_cl_lensing_potential;
pma->has_cl_shear = _TRUE_;//pma->has_cltp_sh;
pma->has_integrated_windows = (pma->has_cl_shear || pma->has_lensing_terms || pma->has_gravitational_terms);
pma->has_unintegrated_windows = (pma->has_cltp_nc && (pma->has_stp_delta_m || pma->has_redshift_space_distortion || pma->has_gravitational_terms || pma->has_doppler_terms));
pma->has_stp_phi_plus_psi = (pma->has_lensing_terms || pma->has_gravitational_terms || pma->has_cl_shear);
pma->has_window_differentiation = (pma->has_redshift_space_distortion || (pma->uses_density_splitting && pma->has_stp_delta_m));
/**
* Defining indices of source types
*
* Here we explicitly give the correspondence between the types
* used in the perturbations module and the matter module
* */
int radtp_size_counter = 0;
/**
* Defining indices of radial types or radtps
*
* These correspond to a combination of
* 1) the source type
* 2) the bessel function type
*
* When different relations are used,
* it is possible to combine some of these
* or instead to split them up
* */
if(pma->uses_density_splitting){
class_define_index(pma->radtp_dens1, pma->has_stp_delta_m,radtp_size_counter, 1);
class_define_index(pma->radtp_dens2, pma->has_stp_delta_m,radtp_size_counter, 1);
}else{
class_define_index(pma->radtp_dens, pma->has_stp_delta_m,radtp_size_counter, 1);
}
if(pma->uses_rsd_combination){
class_define_index(pma->radtp_rsd_combined, pma->has_redshift_space_distortion,radtp_size_counter, 1);
}
else{
class_define_index(pma->radtp_rsd, pma->has_redshift_space_distortion,radtp_size_counter, 1);
class_define_index(pma->radtp_dop1, pma->has_redshift_space_distortion,radtp_size_counter, 1);
class_define_index(pma->radtp_dop2, pma->has_redshift_space_distortion,radtp_size_counter, 1);
}
class_define_index(pma->radtp_g1, pma->has_gravitational_terms,radtp_size_counter, 1);
class_define_index(pma->radtp_g2, pma->has_gravitational_terms,radtp_size_counter, 1);
class_define_index(pma->radtp_g3, pma->has_gravitational_terms,radtp_size_counter, 1);
class_define_index(pma->radtp_nclens, pma->has_lensing_terms,radtp_size_counter, 1);
class_define_index(pma->radtp_shlens, pma->has_cl_shear,radtp_size_counter, 1);
class_define_index(pma->radtp_g4, pma->has_gravitational_terms,radtp_size_counter, 1);
class_define_index(pma->radtp_g5, pma->has_gravitational_terms,radtp_size_counter, 1);
pma->radtp_size_total = radtp_size_counter;
pma->radtp_grid_size = pma->radtp_size_total*pma->radtp_size_total;
/**
* Finally we want to count the total number of
* Cl - types like number-count Cl's (nCl/dCl) or shear Cl's (sCl)
* Currently only nCl and sCl are supported
* */
int cltp_size_counter = 0;
class_define_index(pma->cltp_index_nc,pma->has_cltp_nc,cltp_size_counter, 1);
class_define_index(pma->cltp_index_sh,pma->has_cltp_sh,cltp_size_counter, 1);
pma->cltp_size = cltp_size_counter;
pma->cltp_grid_size = (pma->cltp_size*(pma->cltp_size+1))/2;
if(pma->matter_verbose>MATTER_VERBOSITY_INDICES){
printf(" -> Found requested Cl's :\n");
if(pma->has_cltp_nc){
printf(" -> Number Count Cl's \n");
}
if(pma->has_cltp_sh){
printf(" -> Shear Cl's \n");
}
}
return _SUCCESS_;
}
/**
* Obtain the indices for the bessel integrals
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_bi_indices(
struct matters* pma
){
if(pma->matter_verbose >MATTER_VERBOSITY_FUNCTIONS ){
printf("Method :: Obtaining bessel integral and tilt indices \n");
}
/**
* Whereas before we found how the sources relate to each other
* (Cosmological part)
* now we find how the bessel integrals and tilts are related
* (Geometrical part)
* Of course sometimes we don't need to calculate all possible
* tilts of the bessel integrals, and this depends on not only
* our options, but also if the corresponding radial types are
* defined or not
*
* */
int bi_size_counter = 0;
int tilt_size_counter = 0;
int bi_normal_size_counter =0;
int bi_reduced_size_counter=0;
int bi_lfactor_size_nc_counter=0;
int bi_lfactor_size_sh_counter=0;
/**
* Of course checking how many functions and which are really required
* does get a bit tedious at times
* */
pma->has_bitp_normal = _FALSE_;
pma->has_bitp_nu_reduced = _FALSE_;
pma->has_bitp_lfactor = _FALSE_;
if(pma->uses_relative_factors){
if(pma->has_redshift_space_distortion || pma->has_gravitational_terms || pma->has_lensing_terms){
bi_reduced_size_counter=1;
}
}
{
if(pma->uses_density_splitting){
pma->has_bitp_nu_reduced = _TRUE_;
pma->has_bitp_lfactor = _TRUE_;
if(!pma->uses_relative_factors){
bi_reduced_size_counter++;
}
bi_lfactor_size_nc_counter++;
}
else{
pma->has_bitp_normal = _TRUE_;
bi_normal_size_counter++;
}
}
if(pma->has_redshift_space_distortion){
if(!pma->uses_relative_factors){
if(pma->uses_rsd_combination){
bi_reduced_size_counter++;
}
else{
bi_reduced_size_counter+=3;
}
}
pma->has_bitp_nu_reduced = _TRUE_;
}
if(pma->has_lensing_terms){
bi_lfactor_size_nc_counter++;
pma->has_bitp_lfactor = _TRUE_;
}
if(pma->has_cl_shear){
bi_lfactor_size_sh_counter++;
pma->has_bitp_lfactor = _TRUE_;
}
if(pma->has_gravitational_terms){
if(!pma->uses_relative_factors){
bi_reduced_size_counter+=5;
}
else{
bi_reduced_size_counter+=2;
}
pma->has_bitp_nu_reduced= _TRUE_;
}
pma->has_tilt_normal = pma->has_bitp_normal;
pma->has_tilt_reduced = (pma->has_bitp_lfactor || pma->has_bitp_nu_reduced);
/**
* Once we have find which tilts should exist
* and which bessel integral types should exist,
* we now define corresponding indices
* */
// Normal bessel functions
class_define_index(pma->bitp_index_normal, pma->has_bitp_normal,bi_size_counter, 1);
class_define_index(pma->tilt_index_normal, pma->has_tilt_normal,tilt_size_counter, 1);
// Nu-2 and Nu-4 bessel functions
class_define_index(pma->bitp_index_nu_reduced, pma->has_bitp_nu_reduced ,bi_size_counter, 1);
class_define_index(pma->tilt_index_reduced, pma->has_tilt_reduced,tilt_size_counter, 1);
// l(l+1) prefactor of bessel functions
// (does not introduce new tilt)
class_define_index(pma->bitp_index_lfactor, pma->has_bitp_lfactor,bi_size_counter, 1);
pma->bitp_size = bi_size_counter;
pma->tilt_size = tilt_size_counter;
pma->tilt_grid_size = (pma->tilt_size*(pma->tilt_size+1))/2;
/**
* Now we once again build an analogy table,
* saying for each bessel integral types
* which radial types can be found.
*
* We also define a small macro that takes care of
* checking whether all desired indices have been correctly assigned
* This is mostly a macro checking whether or not the function is
* written correctly is changed
* */
if(pma->matter_verbose>MATTER_VERBOSITY_INDICES){
printf(" -> Analysis of radial and bessel integral type structure : \n");
printf(" -> Found number of tilts %i (symmetric grid %i) \n", pma->tilt_size,pma->tilt_grid_size);
printf(" -> Found number of bessel integral types %i \n",pma->bitp_size);
printf(" -> Bitp normal is %sfound (%4d indices)\n",(pma->has_bitp_normal?"":"not "),bi_normal_size_counter);
printf(" -> Bitp reduced is %sfound (%4d indices)\n",(pma->has_bitp_nu_reduced?"":"not "),bi_reduced_size_counter);
printf(" -> Bitp lfactor is %sfound (%4d indices(nc only), %4d indices(sh only))\n",(pma->has_bitp_lfactor?"":"not "),bi_lfactor_size_nc_counter,bi_lfactor_size_sh_counter);
}
class_alloc(pma->radtps_of_bitp,
pma->bitp_size*pma->cltp_size*sizeof(double*),
pma->error_message);
class_alloc(pma->radtp_of_bitp_size,
pma->bitp_size*pma->cltp_size*sizeof(double),
pma->error_message);
#define matter_index_correspondence(store_array,condition,index_in_array_counter,original_index) \
if((condition)){ \
(store_array)[--(index_in_array_counter)] = (original_index); \
}
if(pma->has_bitp_normal){
if(pma->has_cltp_nc){
class_alloc(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_normal],
bi_normal_size_counter*sizeof(double),
pma->error_message);
pma->radtp_of_bitp_size[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_normal] = bi_normal_size_counter;
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_normal],(pma->has_stp_delta_m && (!pma->uses_density_splitting)),bi_normal_size_counter,pma->radtp_dens)
class_test(bi_normal_size_counter!=0,
pma->error_message,
"Number of radial types for bessel integral type 'normal' do not match up.");
}
if(pma->has_cltp_sh){
pma->radtp_of_bitp_size[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_normal] = 0;
}
}
if(pma->has_bitp_nu_reduced){
if(pma->has_cltp_nc){
class_alloc(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],
bi_reduced_size_counter*sizeof(double),
pma->error_message);
pma->radtp_of_bitp_size[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced] = bi_reduced_size_counter;
if(pma->uses_relative_factors){
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->uses_relative_factors,bi_reduced_size_counter,pma->radtp_combined)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g4)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g5)
}
else{
if(pma->uses_rsd_combination){
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_redshift_space_distortion,bi_reduced_size_counter,pma->radtp_rsd_combined)
}
else{
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_redshift_space_distortion,bi_reduced_size_counter,pma->radtp_rsd)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_redshift_space_distortion,bi_reduced_size_counter,pma->radtp_dop1)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_redshift_space_distortion,bi_reduced_size_counter,pma->radtp_dop2)
}
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],(pma->has_stp_delta_m && pma->uses_density_splitting),bi_reduced_size_counter,pma->radtp_dens1)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g1)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g2)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g3)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g4)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_nu_reduced],pma->has_gravitational_terms,bi_reduced_size_counter,pma->radtp_g5)
}
class_test(bi_reduced_size_counter!=0,
pma->error_message,
"Number of radial types for bessel integral type 'reduced' do not match up.");
}
if(pma->has_cltp_sh){
pma->radtp_of_bitp_size[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_nu_reduced] = 0;
}
}
if(pma->has_bitp_lfactor){
if(pma->has_cltp_nc){
class_alloc(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_lfactor],
bi_lfactor_size_nc_counter*sizeof(double),
pma->error_message);
pma->radtp_of_bitp_size[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_lfactor] = bi_lfactor_size_nc_counter;
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_lfactor],pma->has_lensing_terms,bi_lfactor_size_nc_counter,pma->radtp_nclens)
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_nc*pma->bitp_size+pma->bitp_index_lfactor],(pma->has_stp_delta_m && pma->uses_density_splitting),bi_lfactor_size_nc_counter,pma->radtp_dens2)
class_test(bi_lfactor_size_nc_counter!=0,
pma->error_message,
"Number of radial types for bessel integral type 'lfactor' do not match up.");
}
if(pma->has_cltp_sh){
class_alloc(pma->radtps_of_bitp[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_lfactor],
bi_lfactor_size_sh_counter*sizeof(double),
pma->error_message);
pma->radtp_of_bitp_size[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_lfactor] = bi_lfactor_size_sh_counter;
matter_index_correspondence(pma->radtps_of_bitp[pma->cltp_index_sh*pma->bitp_size+pma->bitp_index_lfactor],pma->has_cl_shear,bi_lfactor_size_sh_counter,pma->radtp_shlens)
class_test(bi_lfactor_size_sh_counter!=0,
pma->error_message,
"Number of radial types for bessel integral type 'lfactor' do not match up.");
}
}
/**
* Finally we can print what types were found
* and what bessel integral types they correspond to
* */
if(pma->matter_verbose > MATTER_VERBOSITY_INDICES){
int i,j,k;
for(k=0;k<pma->cltp_size;++k){
if( (!matter_is_index(k,pma->cltp_index_nc,pma->has_cltp_nc))
&&(!matter_is_index(k,pma->cltp_index_sh,pma->has_cltp_sh))
){continue;}
printf(" -> Searching for correspondences at cltp %i \n",k);
for(i=0;i<pma->bitp_size;++i){
for(j=0;j<pma->radtp_of_bitp_size[k*pma->bitp_size+i];++j){
if(pma->uses_relative_factors){
if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_combined,pma->uses_relative_factors)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'combined' \n");
}
}
}
else{
if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_dens,pma->has_stp_delta_m && (!pma->uses_density_splitting))){
if(matter_is_index(i,pma->bitp_index_normal,pma->has_bitp_normal)){
printf(" -> Found in bitp 'normal' index 'density' \n");
}
else if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'density' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_dens1,pma->has_stp_delta_m && pma->uses_density_splitting)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'density (part 1)' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_dens2,pma->has_stp_delta_m && pma->uses_density_splitting)){
if(matter_is_index(i,pma->bitp_index_lfactor,pma->has_bitp_lfactor)){
printf(" -> Found in bitp 'l(l+1) factor' index 'density (part 2)' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_dop1,pma->has_redshift_space_distortion && (!pma->uses_rsd_combination))){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'doppler 1' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_dop2,pma->has_redshift_space_distortion && (!pma->uses_rsd_combination))){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'doppler 2' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_rsd,pma->has_redshift_space_distortion && (!pma->uses_rsd_combination))){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'RSD (dominant term)' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_rsd_combined,pma->has_redshift_space_distortion && (pma->uses_rsd_combination))){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'RSD (all combined)' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_g1,pma->has_gravitational_terms)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'gr 1' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_g2,pma->has_gravitational_terms)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'gr 2' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_g3,pma->has_gravitational_terms)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'gr 3' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_g4,pma->has_gravitational_terms)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'gr 4' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_g5,pma->has_gravitational_terms)){
if(matter_is_index(i,pma->bitp_index_nu_reduced,pma->has_bitp_nu_reduced)){
printf(" -> Found in bitp 'reduced' index 'gr 5' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_nclens,pma->has_cltp_nc && pma->has_lensing_terms)){
if(matter_is_index(i,pma->bitp_index_lfactor,pma->has_bitp_lfactor)){
printf(" -> Found in bitp 'l(l+1) factor' index 'number count lensing' \n");
}
}
else if(matter_is_index(pma->radtps_of_bitp[k*pma->bitp_size+i][j],pma->radtp_shlens,pma->has_cl_shear && pma->has_cltp_sh)){
if(matter_is_index(i,pma->bitp_index_lfactor,pma->has_bitp_lfactor)){
printf(" -> Found in bitp 'l(l+1) factor' index 'shear lensing' \n");
}
}
//Ifend select radtp
}
//Ifend uses relative factors
}
//End radtps
}
//End bitp
}
//End cltp
}
return _SUCCESS_;
}
/**
* Obtain the bessel functions using recursion
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_obtain_bessel_recursion_parallel(struct matters* pma){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS ){
printf("Method :: Obtain bessel integrals from recursion \n");
}
//long long TOTAL_ALLOC;
#ifdef _OPENMP
double start_bessel_omp = omp_get_wtime();
#else
double start_bessel_omp = 0.0;
#endif
int index_tilt1,index_tilt2,index_tilt1_tilt2;
int index_coeff;
int index_l,index_l_eval;
int index_t;
double y_max;
double y_min;
int bessel_recursion_l_size;
/**
* The maximum l of recursion that we need to reach
* is simply given by the last l value in l sampling
* */
double bessel_recursion_l_max = pma->l_sampling[pma->l_size-1];
/**
* There is a semi-analytical formula for calculating how
* many l values are required to reach a given accuracy
* of the bessel integrals.
* This analytic formula simply explodes for t->1,
* which we 'catch' by putting an arbitrary, but very very large
* number here
* It can happen that actually more than this number of l
* would be required, giving us higher errors
* We assume however, that this case is VERY rare
* */
int back_complicated_max_size = 25000;
/**
* One of the more complicated methods can actually
* recognize its own failure
* (It starts a backward recursion, that can be connected
* to the analytic limit for l=0)
* This is the allowed deviation from the analytic limit
* */
double BI_ALLOWED_ERROR = 1e-6;
/**
* Some of the simpler recursion techniques become
* unsafe for small imaginary parts
* Here we explicitly switch those off
* */
double NU_IMAG_BI_RECURSION_SWITCH = -1.0;//20.0;//20.0;
/**
* We want as many samples as the user requested, plus two additional ones:
* t = 1.0 and t = 0.0
* */
int bi_recursion_t_size = pma->bessel_recursion_t_size+2;
/**
* We are going to use a formula of the type
* 1/(1-t), and rounding that to an integer
* Of course not all doubles are representable as an integer,
* so we are going to do a simple cutoff procedure:
* We are going to use
* 1/(1-t+inv_maximum_representable_integer)
* which can be at most
* 1/inv_maximum_representable_integer
*
* Thus, if our maximum representable integer is
* 2*10^9, we would set the flag to 0.5e-9
*
* However, just to be a bit more careful, we
* choose 1e-5
* */
double inv_maximum_representable_integer = 1e-5;
double nu_min_forward_real = 1.5;
double l_max_forward_real = 1200;
double l_max_backward_real = 5000;
/**
* Now we can set the y_max and y_min variables
*
* The cases t=1.0 and t=0.0 are handled completely seperately anyway,
* so we just need reasonable limits that are not 'too' far off,
*
* Here, we choose the immediate values from
* (BI_MIN_T up to 1.0-BI_SAMPLING_EPSILON)
* which is inside the interval
* (0.0,1.0)
* */
double BI_SAMPLING_EPSILON = 1e-8;//1e-8
double BI_MIN_T = 1e-6;//1e-6
y_max = -log(BI_SAMPLING_EPSILON);
y_min = -log(1.0-BI_MIN_T);
/**
* If our l is sampled logarithmically,
* the same can not be done for the recursion relations
* (which require every single l)
*
* As such, we can have a different size here
* (bessel_recursion_l_size instead of pma->l_size)
* */
bessel_recursion_l_size = bessel_recursion_l_max+1;
pma->l_size_recursion = (pma->uses_bessel_storeall?bessel_recursion_l_size:pma->l_size);
/**
* Allocate the arrays in which we want to store the final bessel integrals
* (Bessel Integrals are shortened to BI)
* */
class_alloc(pma->bi_real,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->bi_imag,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->bi_size,
pma->tilt_grid_size*sizeof(int*),
pma->error_message);
class_alloc(pma->bi_max,
pma->tilt_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_sampling,
bi_recursion_t_size*sizeof(double),
pma->error_message);
/**
* Define and allocate temporary arrays,
* which are required for the evaluations
* */
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
class_alloc(pma->bi_real[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_imag[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_size[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(int),
pma->error_message);
class_alloc(pma->bi_max[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double),
pma->error_message);
/**
* Allocate the real and imaginary arrays
* Also allocate the sampling array
* */
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
class_alloc(pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
bi_recursion_t_size*sizeof(double),
pma->error_message);
class_alloc(pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
bi_recursion_t_size*sizeof(double),
pma->error_message);
}
}
if(pma->matter_verbose > MATTER_VERBOSITY_BESSEL){
printf(" -> Obtaining recursion starting bessel integrals for tilt %f \n",pma->bias-pma->nu_real[index_tilt1_tilt2]);
}
int abort = _FALSE_;
#pragma omp parallel private(index_l,index_t,index_coeff) firstprivate(y_min,y_max,pma,bi_recursion_t_size,bessel_recursion_l_size,back_complicated_max_size,index_tilt1_tilt2)
{
double* max_t;
double* abi_real;
double* abi_imag;
double* initial_abs;
class_alloc_parallel(max_t,
bessel_recursion_l_size*sizeof(double),
pma->error_message);
class_alloc_parallel(abi_real,
(bessel_recursion_l_size+back_complicated_max_size)*sizeof(double),
pma->error_message);
class_alloc_parallel(abi_imag,
(bessel_recursion_l_size+back_complicated_max_size)*sizeof(double),
pma->error_message);
class_alloc_parallel(initial_abs,
bessel_recursion_l_size*sizeof(double),
pma->error_message);
double back_simple_time = 0;
double for_simple_time = 0;
double complex_time = 0;
double inverse_time = 0;
double taylor_time = 0;
#pragma omp for schedule(dynamic,(pma->size_fft_result>=CHUNK_SIZE*omp_get_max_threads()?CHUNK_SIZE:1))
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
if(pma->matter_verbose > MATTER_VERBOSITY_BESSEL){
if(!MATTER_REWRITE_PRINTING){
printf(" -> Obtaining bessel from nu[%3d/%3d] = %.10e+%.10ej \n",index_coeff,pma->size_fft_result-1,pma->nu_real[index_tilt1_tilt2],pma->nu_imag[index_coeff]);
}
else{
printf("\r -> Obtaining bessel from nu[%3d/%3d] = %.10e+%.10ej ",index_coeff,pma->size_fft_result-1,pma->nu_real[index_tilt1_tilt2],pma->nu_imag[index_coeff]);
fflush(stdout);
}
}
/**
* Set the nu_real and nu_imag parameters
* (as shorthands for quicker writing)
* */
double nu_real = pma->nu_real[index_tilt1_tilt2];
double nu_imag = pma->nu_imag[index_coeff];
/**
* Obtain the initial bessel integrals and enter them already into the final array
* */
bessel_integral_recursion_initial_abs(bessel_recursion_l_max,nu_real,nu_imag,abi_real,abi_imag,initial_abs);
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
index_l_eval = (pma->uses_bessel_storeall?index_l:(int)pma->l_sampling[index_l]);
pma->bi_sampling[0]= 0.0;
pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff][0] = abi_real[index_l_eval];
pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff][0] = abi_imag[index_l_eval];
}
/**
* Set the minimum t for which |I_l(nu,t)|<eps |I_l(nu,1)|
* conservatively to 0, and correct later
* */
memset(max_t,0,bessel_recursion_l_size*sizeof(double));
/**
* This flag is going to tell us which is the current
* maximum l for any given t
* that does not statisfy the exit condition:
* |I_l(nu,t)|<eps |I_l(nu,1)|
*
* The second term is given by initial_abs
* */
int l_max_cur = bessel_recursion_l_max;
/**
* This flag keeps track of overflows happening during the summation
* of the hypergeometric functions. If no more overflows occur
* ( this flag being set to _FALSE_ )
* then a simplified version of the summation can be used,
* which does not check for further overflows.
*
* Always initialize as _TRUE_ !
* */
short overflow_flag = _TRUE_;
for(index_t=1;index_t<bi_recursion_t_size;++index_t){
/**
* Obtain the t at which we want to sample
* */
double y = y_max-(y_max-y_min)*sqrt(((double)(index_t-1))/((double)(bi_recursion_t_size-2)));
double t = 1.0-exp(-y);
/**
* Here are some semi-analytical determinations
* of the range to which each method extends
*
* Sadly, these only apply for a bias around ~1.9
* and for the maximum error of 1e-6
*
* Feel free to improve these formuli
*
* Forward simple:
* Limited to t close to 1,
* the deviation of which we fit,
* and capped off with a maximum value (that depends on nu_imag)
*
* Backward simple:
* Limited to t close to 1 (surprisingly),
* but with a much broader range
* Update: Using the overflow-safe version, which requires the
* overflow_flag to keep track of overflows during calculation
*
* Self inverse taylor:
* Limited to very very close to 1,
* (otherwise too slow)
*
* Complex:
* Decides on forward or backward recursion
* using matrix invertibility criteria,
* but requires sufficient l to shave off initial errors in the
* starting conditions
* */
double forward_simple_factor_high_real = (nu_imag*nu_imag/10000.0+nu_imag/3.3+4.0);
double forward_simple_factor_low_real = nu_imag/4.0*0.95;
double forward_simple_factor = (forward_simple_factor_high_real-forward_simple_factor_low_real)*(nu_real+2.1)/4.0+forward_simple_factor_low_real;
double forward_l_max_const = 3000.0+nu_imag*40.0;
int l_max_forward_simple = (int)MIN((forward_simple_factor/(1-t+inv_maximum_representable_integer)),forward_l_max_const);
double backward_simple_factor = 10.0+nu_imag/5.0;
int l_max_backward_simple = (int)(backward_simple_factor/(1-t+inv_maximum_representable_integer));
double self_inverse_taylor_factor = 15.0;
int l_max_self_inverse_taylor = (int)(self_inverse_taylor_factor/(1-t+inv_maximum_representable_integer));
int delta_l_required = (int)MIN((12./(1-t)),back_complicated_max_size);
double backward_simple_lfactor = MAX(1.5-nu_imag/15,0.0);
if(t<T_MIN_TAYLOR){
#ifdef _OPENMP
double func_start_t = omp_get_wtime();
#else
double func_start_t = 0.0;
#endif
bessel_integral_recursion_taylor(l_max_cur,nu_real,nu_imag,t,max_t,initial_abs,abi_real,abi_imag);
#ifdef _OPENMP
taylor_time += omp_get_wtime()-func_start_t;
#else
taylor_time +=0.0;
#endif
}
/**
* We want the hypergeometric series to have only very vew terms
* The arguments are approximately l^2/4 * (1-z)^2/(1+z)^2 << 1
* (neglecting any nu dependence)
* Using z = t^2 = (1-eps)^2 ~ 1-2eps
* we find (1-z)^2/(1+z)^2 ~ eps^2
* Then we find (l*eps/2)^2 << 1
* As such we get eps << 2/l => 1-t = alpha*2/l with alpha<<1
* We find quick convergence for 1-t = 2*alpha/l < 2*T_MIN_INVERSE_TAYLOR/l_max
* ( l < l_max , alpha = T_MIN_INVERSE_TAYLOR)
* */
else if(l_max_self_inverse_taylor >l_max_cur && t>1.-T_MIN_INVERSE_TAYLOR){
#ifdef _OPENMP
double func_start_t = omp_get_wtime();
#else
double func_start_t = 0.0;
#endif
bessel_integral_recursion_inverse_self(l_max_cur,nu_real,nu_imag,t,abi_real,abi_imag,max_t,initial_abs,pma->error_message);
#ifdef _OPENMP
inverse_time += omp_get_wtime()-func_start_t;
#else
inverse_time += 0.0;
#endif
}
else if(
( nu_imag>NU_IMAG_BI_RECURSION_SWITCH &&
l_max_forward_simple > l_max_cur )
|| (nu_real>nu_min_forward_real && l_max_cur<l_max_forward_real)
){
#ifdef _OPENMP
double func_start_t = omp_get_wtime();
#else
double func_start_t = 0.0;
#endif
class_call_parallel(bessel_integral_recursion_forward_simple(
l_max_cur,
nu_real,
nu_imag,
t,
abi_real,
abi_imag,
max_t,
initial_abs,
pma->error_message),
pma->error_message,
pma->error_message);
#ifdef _OPENMP
for_simple_time += omp_get_wtime()-func_start_t;
#else
for_simple_time += 0.0;
#endif
}
else if(
(nu_imag>NU_IMAG_BI_RECURSION_SWITCH &&
l_max_backward_simple > l_max_cur) ||
(l_max_cur<l_max_backward_real)
){
#ifdef _OPENMP
double func_start_t = omp_get_wtime();
#else
double func_start_t = 0.0;
#endif
class_call_parallel(bessel_integral_recursion_backward_simple_safe(
l_max_cur,
(1.1+backward_simple_lfactor)*l_max_cur,
nu_real,
nu_imag,
t,
abi_real,
abi_imag,
max_t,
initial_abs,
&overflow_flag,
pma->error_message),
pma->error_message,
pma->error_message);
#ifdef _OPENMP
back_simple_time += omp_get_wtime()-func_start_t;
#else
back_simple_time += 0.0;
#endif
}
else{
#ifdef _OPENMP
double func_start_t = omp_get_wtime();
#else
double func_start_t = 0.0;
#endif
class_call_parallel(bessel_integral_recursion_complicated(l_max_cur,
l_max_cur+delta_l_required-1,
nu_real,
nu_imag,
t,
BI_ALLOWED_ERROR,
abi_real,
abi_imag,
max_t,
initial_abs,
pma->error_message),
pma->error_message,
pma->error_message);
#ifdef _OPENMP
complex_time += omp_get_wtime()-func_start_t;
#else
complex_time += 0.0;
#endif
}
/**
* After obtaining the corresponding bessel integrals through recursion
* for this particular value for t,
* we need to store them within the storage arrays
* */
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
index_l_eval = (pma->uses_bessel_storeall?index_l:(int)pma->l_sampling[index_l]);
if(index_l_eval>l_max_cur){continue;}
pma->bi_sampling[index_t]= 1.0-t;
pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff][index_t] = abi_real[index_l_eval];
pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff][index_t] = abi_imag[index_l_eval];
if(max_t[index_l_eval]>=t){
/**
* If the condition |I_l(nu,t)|<eps*|I_l(nu,1)|
* is fulfilled, we do not need to evaluate this mode
* for any smaller values of t
*
* Thus this mode is 'exiting' our evaluation range
*
* We can also then reallocate the arrays reaching to exactly this point
* And also set the size and maximum evaluable point before 0
* */
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff] = index_t+1;
pma->bi_max[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff] = 1.0 - t;
//TODO :: figure out why parallel reallocation leads to segmentation faults
/*class_realloc_parallel(pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
class_realloc_parallel(pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
class_realloc_parallel(pma->bi_sampling[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_sampling[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);*/
}
//Ifend exit
}
//End l
/**
* If a mode has 'exited',
* we need to check what the current
* maximum mode to still be evolved is
* */
index_l=l_max_cur;
while(index_l>0 && max_t[index_l]>=t){
index_l--;
l_max_cur--;
}
}
//End t
/**
* The analytic solutions at l=0 and l=1
* continue to t=1 without reaching their criterion
* (The l=0 goes towards a constant, the l=1 decreases too slowly to exit in most cases)
* */
if(pma->uses_bessel_storeall){
pma->bi_size[index_tilt1_tilt2][0*pma->size_fft_result+index_coeff] = bi_recursion_t_size;
pma->bi_max[index_tilt1_tilt2][0*pma->size_fft_result+index_coeff] = 0.0;
pma->bi_size[index_tilt1_tilt2][1*pma->size_fft_result+index_coeff] = bi_recursion_t_size;
pma->bi_max[index_tilt1_tilt2][1*pma->size_fft_result+index_coeff] = 0.0;
}
}
//End coeff
if(MATTER_REWRITE_PRINTING){
printf("\r \n");
}
free(initial_abs);
free(max_t);
free(abi_real);
free(abi_imag);
}
//End parallel
if (abort == _TRUE_) return _FAILURE_;
}
//End tilt2
}
//End tilt1
/**
* Delete temporary arrays
* */
#ifdef _OPENMP
double end_bessel_omp = omp_get_wtime();
#else
double end_bessel_omp = 0.0;
#endif
if(pma->matter_verbose > MATTER_VERBOSITY_TIMING) {
printf(" -> Obtaining (recursion) Bessel Integrals took %f REAL seconds \n",end_bessel_omp-start_bessel_omp);
}
return _SUCCESS_;
}
/**
* Obtain the nonlinearity factor, replacing the values of
* fft_coeff_real, and fft_coeff_imag
*
* @param pma Input: pointer to matter struct
* @param fft_coeff_real Input/Output: fft coefficients (real)
* @param fft_coeff_imag Input/Output: fft coefficients (imag)
* @return the error status
*/
int matter_obtain_nonseparability(
struct matters* pma,
double ** fft_real,
double ** fft_imag
){
if(pma->matter_verbose>MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Obtain non-scale-invariance factor\n");
}
double* fft_coeff_factor_real;
double* fft_coeff_factor_imag;
double* fft_coeff_real = *fft_real;
double* fft_coeff_imag = *fft_imag;
int index_tau1,index_tau2,index_tau1_tau2,index_coeff;
class_alloc(fft_coeff_factor_real,
pma->size_fft_input*(pma->tau_grid_size+1)*sizeof(double),
pma->error_message);
class_alloc(fft_coeff_factor_imag,
pma->size_fft_input*(pma->tau_grid_size+1)*sizeof(double),
pma->error_message);
for(index_tau1=0;index_tau1<pma->tau_size;++index_tau1){
for(index_tau2=0;index_tau2<pma->tau_size;++index_tau2){
index_tau1_tau2 = index_tau2*pma->tau_size+index_tau1;
for(index_coeff = 0; index_coeff < pma->size_fft_result;++index_coeff){
fft_coeff_factor_real[index_tau1_tau2*pma->size_fft_input+index_coeff] =
fft_coeff_real[index_tau1_tau2*pma->size_fft_input+index_coeff]/
(pma->growth_factor_tau[index_tau1]*pma->growth_factor_tau[index_tau2]);
fft_coeff_factor_imag[index_tau1_tau2*pma->size_fft_input+index_coeff] =
fft_coeff_imag[index_tau1_tau2*pma->size_fft_input+index_coeff]/
(pma->growth_factor_tau[index_tau1]*pma->growth_factor_tau[index_tau2]);
}
//End index_coeff
}
//End tau 1
}
//End tau 2
free(fft_coeff_real);
free(fft_coeff_imag);
*fft_real = fft_coeff_factor_real;
*fft_imag = fft_coeff_factor_imag;
return _SUCCESS_;
}
/**
* Spline the bessel integrals after having recursively found them
*
* @param pma Input: pointer to matter struct
* @return the error status
*/
int matter_spline_bessel_integrals_recursion(
struct matters * pma
) {
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Spline (recursion) bessel integrals\n");
}
#ifdef _OPENMP
double spline_start_omp = omp_get_wtime();
#else
double spline_start_omp = 0.0;
#endif
/**
* Define indices and allocate arrays
* */
int index_coeff;
int index_l;
int index_tilt1,index_tilt2,index_tilt1_tilt2;
class_alloc(pma->ddbi_real,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->ddbi_imag,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
class_alloc(pma->ddbi_real[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->ddbi_imag[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
class_alloc(pma->ddbi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
class_alloc(pma->ddbi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
}
}
int abort = _FALSE_;
#pragma omp parallel private(index_l,index_coeff) firstprivate(pma,index_tilt1_tilt2)
{
#pragma omp for
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
class_call_parallel(array_spline_table_columns(pma->bi_sampling,
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
1,
pma->ddbi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
_SPLINE_EST_DERIV_,
pma->error_message),
pma->error_message,
pma->error_message);
class_call_parallel(array_spline_table_columns(pma->bi_sampling,
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
1,
pma->ddbi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
_SPLINE_EST_DERIV_,
pma->error_message),
pma->error_message,
pma->error_message);
}
//End coeff
}
//End l
}
if(abort == _TRUE_) {return _FAILURE_;}
}
//End tilt2
}
//End tilt1
#ifdef _OPENMP
double spline_end_omp = omp_get_wtime();
#else
double spline_end_omp = 0.0;
#endif
if(pma->matter_verbose > MATTER_VERBOSITY_TIMING ){
printf(" -> Splining bessel integrals (recursion) took %f REAL seconds \n",spline_end_omp-spline_start_omp);
}
return _SUCCESS_;
}
/**
* Cubic hermite spline interpolation
*
* @param x_array Input: pointer x array
* @param x_size Input: size x array
* @param array Input: pointer y array
* @param array_splined Input: pointer ddy array
* @param y_size Input: size y array
* @param x Input: x to interpolate at
* @param last_index Input/Output: last index at which x was found
* @param result Output: pointer to y(x) value
* @param errmsg Output: the error message
* @return the error status
*/
int matter_interpolate_spline_growing_hunt(
double * x_array,
int x_size,
double * array, //[index_y*x_size+index_x]
double * array_splined,
int y_size,
double x,
int * last_index,
double * result,
ErrorMsg errmsg
) {
double h,a,b;
int index_y;
int inf,sup;
matter_spline_hunt(x_array,x_size,x,last_index,&h,&a,&b,errmsg);
inf = *last_index;
sup = inf+1;
for (index_y=0; index_y<y_size; index_y++){
*(result+index_y) =
a * *(array+inf+x_size*index_y) +
b * *(array+sup+x_size*index_y) +
((a*a*a-a)* *(array_splined+inf+x_size*index_y) +
(b*b*b-b)* *(array_splined+sup+x_size*index_y))*h*h/6.;
}
return _SUCCESS_;
}
/**
* FFTlog the perturbation sources in a parallelized fashion
*
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param ppm Input: pointer to primordial struct
* @param pma Input: pointer to matter struct
* @param sampled_sources Input: array of sampled source
* @param prim_spec Input: array of sampled primordial spectrum
* @param fft_coeff_real Output: the fft coefficients (real)
* @param fft_coeff_imag Output: the fft coefficients (imaginary)
* @return the error status
*/
int matter_FFTlog_perturbation_sources_parallel(
struct matters * pma,
double ** fft_coeff_real_ptr,
double ** fft_coeff_imag_ptr
) {
double* fft_coeff_real;
double* fft_coeff_imag;
int N_threads;
int index_tau1,index_tau2,index_tau1_tau2;
#ifdef _OPENMP
if(!pma->uses_separability){
N_threads = omp_get_max_threads();
}
else{
N_threads=1;
}
#else
N_threads = 1;
#endif
int n_thread;
double** integrand1;
double** integrand2;
class_alloc(integrand1,
N_threads*sizeof(double*),
pma->error_message);
class_alloc(integrand2,
N_threads*sizeof(double*),
pma->error_message);
class_alloc(pma->FFT_plan,
N_threads*sizeof(struct FFT_plan*),
pma->error_message);
for(n_thread=0;n_thread<N_threads;++n_thread){
FFT_planner_init(pma->size_fft_input,&(pma->FFT_plan[n_thread]));
class_alloc(integrand1[n_thread],
pma->size_fft_input*sizeof(double),
pma->error_message);
class_alloc(integrand2[n_thread],
pma->size_fft_input*sizeof(double),
pma->error_message);
}
if(pma->uses_separability){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: FFT only today\n");
}
int index_stp1,index_stp2,index_stp1_stp2;
int index_ic1,index_ic2,index_ic1_ic2;
int index_coeff;
/**
* First we allocate the integrands and the fft coefficients
* */
class_alloc(fft_coeff_real,
2*pma->size_fft_input*sizeof(double),
pma->error_message);
class_alloc(fft_coeff_imag,
2*pma->size_fft_input*sizeof(double),
pma->error_message);
/**
* Now we iterate over all combinations of sources and do the FFT transformation
* */
{
{
int abort = _FALSE_;
{
{
int tid=0;
index_tau1=pma->tau_size-1;
index_tau2=pma->tau_size-1;
/**
* There is a neat trick with FFT transformations for real inputs,
* we can do two transformations at once.
* In this case, we should simply ignore the second part,
* since there is only one tau value (tau0),
* It was easier to simply set the second integrand to 0 than rewriting
* the entire FFT functionality
* */
for(index_coeff=0;index_coeff<pma->size_fft_input;++index_coeff){
integrand1[tid][index_coeff] = pma->sampled_sources[index_coeff*pma->tau_size+index_tau1]
*pma->sampled_sources[index_coeff*pma->tau_size+index_tau2]
*pow(pma->k_sampling[index_coeff]/pma->k_sampling[0],-pma->bias);
}
for(index_coeff=0;index_coeff<pma->size_fft_input;++index_coeff){
integrand2[tid][index_coeff] = 0.0;
}
FFT_real_short_planned(integrand1[tid],
integrand2[tid],
fft_coeff_real,
fft_coeff_imag,
fft_coeff_real+1*pma->size_fft_input,
fft_coeff_imag+1*pma->size_fft_input,
pma->FFT_plan[tid]);
/**
* The coefficients we have calculated are not yet the final coefficients
* For these, we have to multiply with k0^(nu_imag)
* */
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
double exp_factor = pow(pma->k_sampling[0],-pma->bias);
double phase = -log(pma->k_sampling[0])*pma->nu_imag[index_coeff];
double exp_real = exp_factor*cos(phase);
double exp_imag = exp_factor*sin(phase);
double coeff_real = fft_coeff_real[index_coeff];
double coeff_imag = fft_coeff_imag[index_coeff];
double newcoeff_real = coeff_real*exp_real-coeff_imag*exp_imag;
double newcoeff_imag = coeff_real*exp_imag+coeff_imag*exp_real;
fft_coeff_real[index_coeff] = newcoeff_real/pma->size_fft_input;
fft_coeff_imag[index_coeff] = newcoeff_imag/pma->size_fft_input;
}
//end coeffs
}
//End stp2
}
//End stp1
if(abort == _TRUE_){return _FAILURE_;}
}
//end ic2
}
//end ic1
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf(" -> Returning from FFTlog... \n");
}
}else{
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: FFT for every tau combination\n");
}
int index_coeff;
/**
* Allocate the integrands and the coefficient arrays
* */
class_alloc(fft_coeff_real,
pma->size_fft_input*(pma->tau_grid_size+1)*sizeof(double),
pma->error_message);
class_alloc(fft_coeff_imag,
pma->size_fft_input*(pma->tau_grid_size+1)*sizeof(double),
pma->error_message);
/**
* Now we iterate over all combinations of sources and do the FFT transformation
* */
{
{
{
{
int abort = _FALSE_;
#pragma omp parallel for collapse(2) private(index_tau1,index_tau2,index_tau1_tau2,index_coeff) firstprivate(fft_coeff_real,fft_coeff_imag)
for(index_tau1=0;index_tau1<pma->tau_size;++index_tau1){
for(index_tau2=0;index_tau2<pma->tau_size;index_tau2+=2){
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
index_tau1_tau2 = index_tau1*pma->tau_size+index_tau2;
/**
* There is a neat trick with FFT transformations for real inputs,
* we can do two transformations at once.
* */
for(index_coeff=0;index_coeff<pma->size_fft_input;++index_coeff){
integrand1[tid][index_coeff] = pma->sampled_sources[index_coeff*pma->tau_size+index_tau1]
*pma->sampled_sources[index_coeff*pma->tau_size+index_tau2]
*pow(pma->k_sampling[index_coeff]/pma->k_sampling[0],-pma->bias);
}
for(index_coeff=0;index_coeff<pma->size_fft_input;++index_coeff){
integrand2[tid][index_coeff] = pma->sampled_sources[index_coeff*pma->tau_size+index_tau1]
*pma->sampled_sources[index_coeff*pma->tau_size+index_tau2+1]
*pow(pma->k_sampling[index_coeff]/pma->k_sampling[0],-pma->bias);
}
FFT_real_short_planned(integrand1[tid],
integrand2[tid],
fft_coeff_real+index_tau1_tau2*pma->size_fft_input,
fft_coeff_imag+index_tau1_tau2*pma->size_fft_input,
fft_coeff_real+(index_tau1_tau2+1)*pma->size_fft_input,
fft_coeff_imag+(index_tau1_tau2+1)*pma->size_fft_input,
pma->FFT_plan[tid]);
/**
* The coefficients we have calculated are not yet the final coefficients
* For these, we have to multiply with k0^(nu_imag)
* */
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
double exp_factor = pow(pma->k_sampling[0],-pma->bias);
double phase = -log(pma->k_sampling[0])*pma->nu_imag[index_coeff];
double exp_real = exp_factor*cos(phase);
double exp_imag = exp_factor*sin(phase);
double coeff_real = fft_coeff_real[index_tau1_tau2*pma->size_fft_input+index_coeff];
double coeff_imag = fft_coeff_imag[index_tau1_tau2*pma->size_fft_input+index_coeff];
double newcoeff_real = coeff_real*exp_real-coeff_imag*exp_imag;
double newcoeff_imag = coeff_real*exp_imag+coeff_imag*exp_real;
fft_coeff_real[index_tau1_tau2*pma->size_fft_input+index_coeff] = newcoeff_real/pma->size_fft_input;
fft_coeff_imag[index_tau1_tau2*pma->size_fft_input+index_coeff] = newcoeff_imag/pma->size_fft_input;
coeff_real = fft_coeff_real[(index_tau1_tau2+1)*pma->size_fft_input+index_coeff];
coeff_imag = fft_coeff_imag[(index_tau1_tau2+1)*pma->size_fft_input+index_coeff];
newcoeff_real = coeff_real*exp_real-coeff_imag*exp_imag;
newcoeff_imag = coeff_real*exp_imag+coeff_imag*exp_real;
fft_coeff_real[(index_tau1_tau2+1)*pma->size_fft_input+index_coeff] = newcoeff_real/pma->size_fft_input;
fft_coeff_imag[(index_tau1_tau2+1)*pma->size_fft_input+index_coeff] = newcoeff_imag/pma->size_fft_input;
}
//end coeffs
}
//End tau2
}
//End tau1
if(abort==_TRUE_){return _FAILURE_;}
}
//End stp1
}
//End stp2
}
//End ic2
}
//End ic1
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS ){
printf(" -> Returning from FFTlog... \n");
}
}
for(n_thread=0;n_thread<N_threads;++n_thread){
FFT_planner_free(&(pma->FFT_plan[n_thread]));
free(integrand1[n_thread]);
free(integrand2[n_thread]);
}
free(integrand1);
free(integrand2);
free(pma->FFT_plan);
*fft_coeff_real_ptr = fft_coeff_real;
*fft_coeff_imag_ptr = fft_coeff_imag;
return _SUCCESS_;
}
/**
* Integrate the Cl's
*
* @param ppr Input: pointer to precision struct
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param pma Input: pointer to matter struct
* @param fft_coeff_real Input: the fft coefficients (real)
* @param fft_coeff_imag Input: the fft coefficients (imaginary)
* @return the error status
*/
int matter_integrate_cl(struct matters* pma,
double * fft_coeff_real,
double * fft_coeff_imag){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Integrate Cl's\n");
}
/**
* Initialize and allocate local variables
* */
int index_l;
int index_wd1,index_wd2,index_wd1_wd2;
int index_cltp1,index_cltp2,index_cltp1_cltp2;
struct matters_workspace mw;
struct matters_workspace * pmw = &mw;
pmw->fft_coeff_real = fft_coeff_real;
pmw->fft_coeff_imag = fft_coeff_imag;
/**
*
* Now we allocate the fft coefficient arrays
* we use to interpolate into
* We also allocate the bessel integral arrays
* we use to interpolate into
*
* */
int tw_max_size = 0;
if(pma->has_unintegrated_windows){
tw_max_size = MAX(tw_max_size,pma->tw_size);
}
if(pma->has_integrated_windows){
tw_max_size = MAX(tw_max_size,pma->integrated_tw_size);
}
pmw->tau_max_size = tw_max_size;
/**
* Now allocate global workspace, and thread workspace
* */
class_call(matter_workspace_alloc(pma,pmw),
pma->error_message,
pma->error_message);
class_call(matter_vector_alloc(pma,pmw),
pma->error_message,
pma->error_message);
class_test(pma->uses_integration != matter_integrate_tw_t && pma->uses_integration != matter_integrate_tw_logt,
pma->error_message,
"tau integration type not recognized. (Neither tw_t, nor tw_logt) ");
int abort = _FALSE_;
#pragma omp parallel private(index_l) firstprivate(pma,pmw)
{
if(pma->uses_limber_approximation){
#pragma omp for
for(index_l=0;index_l<pma->l_size;++index_l){
class_call_parallel(matter_get_bessel_limber(pma,
index_l,
pmw),
pma->error_message,
pma->error_message);
}
//End l
}
else if(pma->has_integrated_windows){
#pragma omp for
for(index_l=0;index_l<pma->l_size;++index_l){
class_call_parallel(matter_get_bessel_fort_parallel_integrated(
pma,
index_l,
pmw),
pma->error_message,
pma->error_message);
}
//End l
}
//Ifend limber
}
if(abort == _TRUE_) {return _FAILURE_;}
/**
* Finally, we allocate the Cl's array
* and iterate through all initial conditions and window functions
* */
class_alloc(pma->cl,
pma->cltp_grid_size*sizeof(double*),
pma->error_message);
for(index_cltp1=0;index_cltp1<pma->cltp_size;++index_cltp1){
for(index_cltp2=index_cltp1;index_cltp2<pma->cltp_size;++index_cltp2){
index_cltp1_cltp2 = index_symmetric_matrix(index_cltp1,index_cltp2,pma->cltp_size);
{
{
class_alloc(pma->cl[index_cltp1_cltp2],
pma->window_size[index_cltp1_cltp2]*pma->l_size*sizeof(double),
pma->error_message);
int win_counter = 0;
for(index_wd1=0;index_wd1<pma->num_windows_per_cltp[index_cltp1];++index_wd1){ //[NS]: modified for DESC
for(index_wd2=pma->window_index_start[index_cltp1_cltp2][index_wd1];index_wd2<=pma->window_index_end[index_cltp1_cltp2][index_wd1];++index_wd2){
index_wd1_wd2 = index_symmetric_matrix(index_wd1,index_wd2,pma->num_windows);
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION){
// printf(" -> Calculating for Window Combination %5d,%5d (%10d/%10d) \n",index_wd1,index_wd2,index_wd1_wd2,(pma->num_windows*(pma->num_windows+1))/2)
printf(" -> Calculating for Window Combination %5d,%5d (%10d/%10d) \n",index_wd1,index_wd2,win_counter,pma->window_size[index_cltp1_cltp2]);
}
/**
* Save information in global workspace
* */
pmw->index_wd1 = index_wd1;
pmw->index_wd2 = index_wd2;
pmw->index_cltp1 = index_cltp1;
pmw->index_cltp2 = index_cltp2;
pmw->index_wd1_wd2 = index_symmetric_matrix(pmw->index_wd1,pmw->index_wd2,pma->num_windows);
pmw->index_cltp1_cltp2 = index_symmetric_matrix(pmw->index_cltp1,pmw->index_cltp2,pma->cltp_size);
pmw->window_counter = win_counter;
/**
* Now integrate the actual integral
* */
class_call(matter_integrate_each(pma,
pmw),
pma->error_message,
pma->error_message);
win_counter++;
}
//End wd2
}
//End wd1
}
//End ic2
}
//End ic1
}
//End cltp2
}
//End cltp1
/**
* Finally print the obtained results
* */
if(pma->matter_verbose > MATTER_VERBOSITY_CLRESULTS){
/**
* Print the direct C_l's
* */
printf("RESULTS C_l = \n\n");
printf(" -> l sampling : \n");
for(index_l=0;index_l<pma->l_size;++index_l){
printf("%i,",(int)pma->l_sampling[index_l]);
}
printf("\n");
for(index_cltp1=0;index_cltp1<pma->cltp_size;++index_cltp1){
for(index_cltp2=index_cltp1;index_cltp2<pma->cltp_size;++index_cltp2){
index_cltp1_cltp2 = index_symmetric_matrix(index_cltp1,index_cltp2,pma->cltp_size);
printf(" -> At cltp (%4d,%4d) \n",index_cltp1,index_cltp2);
{
index_wd1_wd2=0;
for(index_wd1=0;index_wd1<pma->num_windows_per_cltp[index_cltp1];++index_wd1){
for(index_wd2=pma->window_index_start[index_cltp1_cltp2][index_wd1];index_wd2<=pma->window_index_end[index_cltp1_cltp2][index_wd1];++index_wd2){
printf(" -> At win (%4d,%4d) ... \n",index_wd1,index_wd2);
printf("%.10e",
pma->cl[index_cltp1_cltp2][index_wd1_wd2*pma->l_size+0]
);
for(index_l=1;index_l<pma->l_size;++index_l){
printf(",%.10e",
pma->cl[index_cltp1_cltp2][index_wd1_wd2*pma->l_size+index_l]
);
}
printf("\n");
//End l
index_wd1_wd2++;
}
//End wd2
}
//End wd1
}
//End icgrid
}
//End cltp1
}
//End cltp2
printf("\n");
/**
* Now also print the l(l+1)/2pi C_l's
* */
printf("RESULTS l(l+1)/2pi C_l = \n\n");
printf(" -> l sampling : \n");
for(index_l=0;index_l<pma->l_size;++index_l){
printf("%i,", (int)pma->l_sampling[index_l]);
}
printf("\n");
for(index_cltp1=0;index_cltp1<pma->cltp_size;++index_cltp1){
for(index_cltp2=index_cltp1;index_cltp2<pma->cltp_size;++index_cltp2){
index_cltp1_cltp2 = index_symmetric_matrix(index_cltp1,index_cltp2,pma->cltp_size);
printf(" -> At cltp (%4d,%4d) \n",index_cltp1,index_cltp2);
{
index_wd1_wd2=0;
for(index_wd1=0;index_wd1<pma->num_windows_per_cltp[index_cltp1];++index_wd1){
for(index_wd2=pma->window_index_start[index_cltp1_cltp2][index_wd1];index_wd2<=pma->window_index_end[index_cltp1_cltp2][index_wd1];++index_wd2){
printf(" -> At win (%4d,%4d) ... \n",index_wd1,index_wd2);
printf("%.10e",
pma->l_sampling[0]*(pma->l_sampling[0]+1.0)/(_TWOPI_)*pma->cl[index_cltp1_cltp2][index_wd1_wd2*pma->l_size+0]
);
for(index_l=1;index_l<pma->l_size;++index_l){
printf(",%.10e",
pma->l_sampling[index_l]*(pma->l_sampling[index_l]+1.0)/(_TWOPI_)*pma->cl[index_cltp1_cltp2][index_wd1_wd2*pma->l_size+index_l]
);
}
printf("\n");
//End l
index_wd1_wd2++;
}
//End wd2
}
//End wd1
}
//End icgrid
}
//End cltp1
}
//End cltp2
printf("\n\n");
}
//Ifend Cl printing
/**
* Finally delete also the temporary arrays
* for the coefficients and the bessel functions
* */
class_call(matter_vector_free(pma,pmw),
pma->error_message,
pma->error_message);
/**
* Finally free workspace again
* */
class_call(matter_workspace_free(pma,pmw),
pma->error_message,
pma->error_message);
return _SUCCESS_;
}
/**
* Get the integrand of the cosmological function in t and tau
* for the the t range where 1>2, and the whole tau range
*
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param pma Input: pointer to matter struct
* @param t Input: current value of t
* @param index_ic1 Input: index of initial condition 1
* @param index_ic2 Input: index of initial condition 2
* @param index_radtp1 Input: index of radial type 1
* @param index_radtp2 Input: index of radial type 2
* @param index_stp1_stp2 Input: index of source type combo 1,2
* @param index_wd1 Input: index of window 1
* @param index_wd2 Input: index of window 2
* @param integrand_real Output: the integrand (real part)
* @param integrand_imag Output: the integrand (imaginary part)
* @param wint_fft_real Input: temporary array for fft coefficients
* @param wint_fft_imag Input: temporary array for fft coefficients
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_get_half_integrand(struct matters* pma,
double t,
int index_ic1,
int index_ic2,
int index_radtp1,
int index_radtp2,
int index_stp1_stp2,
int index_wd1,
int index_wd2,
double* integrand_real,
double* integrand_imag,
double** wint_fft_real,
double** wint_fft_imag,
struct matters_workspace* pmw
){
int index_ic1_ic2 = pmw->index_ic1_ic2;
int index_tw_local;
double* fft_real = pmw->fft_coeff_real;
double* fft_imag = pmw->fft_coeff_imag;
double window0_val,window1_val;
int inf0=0,inf1=0;
double x0,x1;
int fft_index00,fft_index01,fft_index10,fft_index11;
int index_coeff;
short x1flag;
double h0,a0,b0;
double h1,a1,b1;
double *fft_00_ptr,*fft_01_ptr,*fft_10_ptr,*fft_11_ptr;
class_call(matter_spline_prepare_hunt(
pma->tau_sampling,
pma->tau_size,
pmw->tau_sampling[index_wd1*pmw->tau_size+0],
&inf0,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pma->tau_sampling,
pma->tau_size,
pma->tau0*(1-t)+t*pmw->tau_sampling[index_wd1*pmw->tau_size+0],
&inf1,
pma->error_message),
pma->error_message,
pma->error_message);
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
x1flag = _TRUE_;
if(pma->uses_intxi_logarithmic && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
x0 = pma->tau0-pma->exp_integrated_tw_sampling[index_wd1*pmw->tau_size+index_tw_local];//exp(pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local]);
x1 = pma->tau0-t*pma->exp_integrated_tw_sampling[index_wd1*pmw->tau_size+index_tw_local];// - pma->small_log_offset;
}
else{
x0 = pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local];
x1 = pma->tau0-t*(pma->tau0-x0);// - pma->small_log_offset;
}
class_test(x0>pma->tau0,
pma->error_message,
"with x0 = %.10e , t = %.10e ,tau0 = %.10e , x0 = %.10e",x0,t,pma->tau0,x0);
class_test(x1>pma->tau0,
pma->error_message,
"with x0 = %.10e , t = %.10e ,tau0 = %.10e , x1 = %.10e",x0,t,pma->tau0,x1);
if(
(x1>pma->tw_max[index_wd2] && (!(pma->has_integrated_windows && matter_is_integrated(index_radtp2))))
||x1<pma->tw_min[index_wd2]){
//The point x1 is outside of the window w2
x1flag=_FALSE_;
}
class_call(matter_spline_hunt(pma->tau_sampling,
pma->tau_size,
x0,//285+pma->small_log_offset,
&inf0,
&h0,
&a0,
&b0,
pma->error_message),
pma->error_message,
pma->error_message);
if(x1flag==_TRUE_){
class_call(matter_spline_hunt(pma->tau_sampling,
pma->tau_size,
x1,//285+pma->small_log_offset,
&inf1,
&h1,
&a1,
&b1,
pma->error_message),
pma->error_message,
pma->error_message);
}
if(!pma->uses_separability){
if(x1flag==_TRUE_){
fft_index00 = (inf0)*pma->tau_size+(inf1);
fft_index01 = (inf0)*pma->tau_size+(inf1+1);
fft_index10 = (inf0+1)*pma->tau_size+(inf1);
fft_index11 = (inf0+1)*pma->tau_size+(inf1+1);
fft_00_ptr = fft_real+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_real+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_real+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_real+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_real[index_tw_local][index_coeff] = a0*a1*fft_00_ptr[index_coeff]+a0*b1*fft_01_ptr[index_coeff]+b0*a1*fft_10_ptr[index_coeff]+b0*b1*fft_11_ptr[index_coeff];
}
fft_00_ptr = fft_imag+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_imag+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_imag+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_imag+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_imag[index_tw_local][index_coeff] = a0*(a1*fft_00_ptr[index_coeff]+b1*fft_01_ptr[index_coeff])+b0*(a1*fft_10_ptr[index_coeff]+b1*fft_11_ptr[index_coeff]);
}
}
else{
memset(wint_fft_real[index_tw_local],0,pma->size_fft_result*sizeof(double));
memset(wint_fft_imag[index_tw_local],0,pma->size_fft_result*sizeof(double));
}
//End if x1
}else{
wint_fft_real[index_tw_local]=fft_real;
wint_fft_imag[index_tw_local]=fft_imag;
wint_fft_real[pmw->tau_size+index_tw_local]=fft_real;
wint_fft_imag[pmw->tau_size+index_tw_local]=fft_imag;
}
}
//End tw
int last_index0,last_index1;
class_call(matter_spline_prepare_hunt(
pmw->tau_sampling+index_wd1*pmw->tau_size,
pmw->tau_size,
pmw->tau_sampling[index_wd1*pmw->tau_size],
&last_index0,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pmw->tau_sampling+index_wd1*pmw->tau_size,
pmw->tau_size,
pma->tau0*(1-t)+t*pmw->tau_sampling[index_wd1*pmw->tau_size],
&last_index1,
pma->error_message),
pma->error_message,
pma->error_message);
int derivative_type1 = 0;
int derivative_type2 = 0;
class_call(matter_get_derivative_type(pma,
&derivative_type1,
&derivative_type2,
index_radtp1,
index_radtp2),
pma->error_message,
pma->error_message);
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
if(pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2 && pma->uses_intxi_logarithmic){
x0 = pma->tau0-pma->exp_integrated_tw_sampling[index_wd1*pmw->tau_size+index_tw_local];//exp(pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local]);
}
else{
x0 = pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local];
}
x1 = pma->tau0-t*(pma->tau0-x0);
class_call(matter_get_prepared_window_at(pma,
x0,
index_ic1,
index_radtp1,
index_wd1,
&last_index0,
derivative_type1,
&window0_val),
pma->error_message,
pma->error_message);
class_call(matter_get_prepared_window_at(pma,
x1,
index_ic2,
index_radtp2,
index_wd2,
&last_index1,
derivative_type2,
&window1_val),
pma->error_message,
pma->error_message);
double wwval = window0_val*window1_val;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
integrand_real[index_coeff*pmw->tau_size+index_tw_local] = wwval*wint_fft_real[index_tw_local][index_coeff];
integrand_imag[index_coeff*pmw->tau_size+index_tw_local] = wwval*wint_fft_imag[index_tw_local][index_coeff];
}
//End coeff
}
//End tw
return _SUCCESS_;
}
/**
* Get the integrand of the cosmological function in t and tau
* for the whole t and tau ranges
*
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param pma Input: pointer to matter struct
* @param t Input: current value of t
* @param index_ic1 Input: index of initial condition 1
* @param index_ic2 Input: index of initial condition 2
* @param index_radtp1 Input: index of radial type 1
* @param index_radtp2 Input: index of radial type 2
* @param index_stp1_stp2 Input: index of source type combo 1,2
* @param index_wd1 Input: index of window 1
* @param index_wd2 Input: index of window 2
* @param integrand_real Output: the integrand (real part)
* @param integrand_imag Output: the integrand (imaginary part)
* @param wint_fft_real Input: temporary array for fft coefficients
* @param wint_fft_imag Input: temporary array for fft coefficients
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_get_ttau_integrand(struct matters* pma,
double t,
int index_ic1,
int index_ic2,
int index_radtp1,
int index_radtp2,
int index_stp1_stp2,
int index_wd1,
int index_wd2,
double* integrand_real,
double* integrand_imag,
double** wint_fft_real,
double** wint_fft_imag,
struct matters_workspace* pmw
){
int index_tw_local;
double* fft_real = pmw->fft_coeff_real;
double* fft_imag = pmw->fft_coeff_imag;
double window0_val,window1_val,window2_val;
int inf0=0,inf1=0,inf2=0;
double x0,x1,x2;
double logt = log(t);
double exp_factor = exp(logt*(pma->nu_real[pmw->index_tilt1_tilt2]-2.0));
class_test(t==0,
pma->error_message,
"stop to avoid division by zero or logarithm of zero");
int fft_index00,fft_index01,fft_index10,fft_index11;
int index_coeff;
short x1flag,x2flag;
double h0,a0,b0;
double h1,a1,b1;
double h2,a2,b2;
double* cos_val;
double* sin_val;
//Theoretically, these could be allocated outside of the t loop,
//but currently their allocation is not time consuming at all.
class_alloc(cos_val,
pma->size_fft_cutoff*sizeof(double),
pma->error_message);
class_alloc(sin_val,
pma->size_fft_cutoff*sizeof(double),
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double phase = logt*pma->nu_imag[index_coeff];
cos_val[index_coeff] = cos(phase);
sin_val[index_coeff] = sin(phase);
}
double *fft_00_ptr,*fft_01_ptr,*fft_10_ptr,*fft_11_ptr;
class_call(matter_spline_prepare_hunt(
pma->tau_sampling,
pma->tau_size,
pmw->tau_sampling[index_wd1*pmw->tau_size+0],
&inf0,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pma->tau_sampling,
pma->tau_size,
pma->tau0*(1-t)+t*pmw->tau_sampling[index_wd1*pmw->tau_size+0],
&inf1,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pma->tau_sampling,
pma->tau_size,
pma->tau0*(1-1.0/t)+(1.0/t)*pmw->tau_sampling[index_wd1*pmw->tau_size+0],
&inf2,
pma->error_message),
pma->error_message,
pma->error_message);
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
x1flag = _TRUE_;
x2flag = _TRUE_;
if(pma->uses_intxi_logarithmic && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
x0 = pma->tau0-pma->exp_integrated_tw_sampling[index_wd1*pmw->tau_size+index_tw_local];
}else{
x0 = pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local];
}
x1 = pma->tau0-t*(pma->tau0-x0);// - pma->small_log_offset;
x2 = pma->tau0-(1.0/t)*(pma->tau0-x0);// - pma->small_log_offset;
class_test(x0>pma->tau0,
pma->error_message,
"with x0 = %.10e , t = %.10e ,tau0 = %.10e , x0 = %.10e",x0,t,pma->tau0,x0);
class_test(x1>pma->tau0,
pma->error_message,
"with x0 = %.10e , t = %.10e ,tau0 = %.10e , x1 = %.10e",x0,t,pma->tau0,x1);
class_test(x2>pma->tau0,
pma->error_message,
"with x0 = %.10e , t = %.10e ,tau0 = %.10e , x2 = %.10e",x0,t,pma->tau0,x2);
if(
(x1>pma->tw_max[index_wd2] && (!(pma->has_integrated_windows && pmw->is_integrated_radtp2)))
||x1<pma->tw_min[index_wd2]){
//The point x1 is outside of the window w2
x1flag=_FALSE_;
}
if(
(x2>pma->tw_max[index_wd2] && (!(pma->has_integrated_windows && pmw->is_integrated_radtp2)))
||x2<pma->tw_min[index_wd2]){
//The point x2 is outside of the window w2
x2flag=_FALSE_;
}
class_call(matter_spline_hunt(pma->tau_sampling,
pma->tau_size,
x0,
&inf0,
&h0,
&a0,
&b0,
pma->error_message),
pma->error_message,
pma->error_message);
if(x1flag==_TRUE_){
class_call(matter_spline_hunt(pma->tau_sampling,
pma->tau_size,
x1,
&inf1,
&h1,
&a1,
&b1,
pma->error_message),
pma->error_message,
pma->error_message);
}
if(x2flag==_TRUE_){
class_call(matter_spline_hunt(pma->tau_sampling,
pma->tau_size,
x2,
&inf2,
&h2,
&a2,
&b2,
pma->error_message),
pma->error_message,
pma->error_message);
}
if(!pma->uses_separability){
if(x1flag==_TRUE_){
fft_index00 = (inf0)*pma->tau_size+(inf1);
fft_index01 = (inf0)*pma->tau_size+(inf1+1);
fft_index10 = (inf0+1)*pma->tau_size+(inf1);
fft_index11 = (inf0+1)*pma->tau_size+(inf1+1);
fft_00_ptr = fft_real+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_real+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_real+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_real+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_real[index_tw_local][index_coeff] = a0*a1*fft_00_ptr[index_coeff]+a0*b1*fft_01_ptr[index_coeff]+b0*a1*fft_10_ptr[index_coeff]+b0*b1*fft_11_ptr[index_coeff];
}
fft_00_ptr = fft_imag+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_imag+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_imag+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_imag+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_imag[index_tw_local][index_coeff] = a0*(a1*fft_00_ptr[index_coeff]+b1*fft_01_ptr[index_coeff])+b0*(a1*fft_10_ptr[index_coeff]+b1*fft_11_ptr[index_coeff]);
}
}
else{
memset(wint_fft_real[index_tw_local],0,pma->size_fft_result*sizeof(double));
memset(wint_fft_imag[index_tw_local],0,pma->size_fft_result*sizeof(double));
}
//End if x1
if(x2flag==_TRUE_){
fft_index00 = (inf0)*pma->tau_size+(inf2);
fft_index01 = (inf0)*pma->tau_size+(inf2+1);
fft_index10 = (inf0+1)*pma->tau_size+(inf2);
fft_index11 = (inf0+1)*pma->tau_size+(inf2+1);
fft_00_ptr = fft_real+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_real+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_real+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_real+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_real[index_tw_local+pmw->tau_size][index_coeff] = a0*a2*fft_00_ptr[index_coeff]+a0*b2*fft_01_ptr[index_coeff]+b0*a2*fft_10_ptr[index_coeff]+b0*b2*fft_11_ptr[index_coeff];
}
fft_00_ptr = fft_imag+fft_index00*pma->size_fft_input;
fft_01_ptr = fft_imag+fft_index01*pma->size_fft_input;
fft_10_ptr = fft_imag+fft_index10*pma->size_fft_input;
fft_11_ptr = fft_imag+fft_index11*pma->size_fft_input;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
wint_fft_imag[index_tw_local+pmw->tau_size][index_coeff] = a0*(a2*fft_00_ptr[index_coeff]+b2*fft_01_ptr[index_coeff])+b0*(a2*fft_10_ptr[index_coeff]+b2*fft_11_ptr[index_coeff]);
}
}
else{
memset(wint_fft_real[index_tw_local+pmw->tau_size],0,pma->size_fft_result*sizeof(double));
memset(wint_fft_imag[index_tw_local+pmw->tau_size],0,pma->size_fft_result*sizeof(double));
}
//End if x2
}else{
wint_fft_real[index_tw_local]=fft_real;
wint_fft_imag[index_tw_local]=fft_imag;
wint_fft_real[pmw->tau_size+index_tw_local]=fft_real;
wint_fft_imag[pmw->tau_size+index_tw_local]=fft_imag;
}
}
//End tw
int last_index0,last_index1,last_index2;
class_call(matter_spline_prepare_hunt(
pmw->tau_sampling+index_wd1*pmw->tau_size,
pmw->tau_size,
pmw->tau_sampling[index_wd1*pmw->tau_size],
&last_index0,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pmw->tau_sampling+index_wd1*pmw->tau_size,
pmw->tau_size,
pma->tau0*(1-t)+t*pmw->tau_sampling[index_wd1*pmw->tau_size],
&last_index1,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(matter_spline_prepare_hunt(
pmw->tau_sampling+index_wd1*pmw->tau_size,
pmw->tau_size,
pma->tau0*(1-1.0/t)+(1.0/t)*pmw->tau_sampling[index_wd1*pmw->tau_size],
&last_index2,
pma->error_message),
pma->error_message,
pma->error_message);
int derivative_type1 = 0;
int derivative_type2 = 0;
class_call(matter_get_derivative_type(pma,
&derivative_type1,
&derivative_type2,
index_radtp1,
index_radtp2),
pma->error_message,
pma->error_message);
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
if(pma->uses_intxi_logarithmic && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
x0 = pma->tau0-pma->exp_integrated_tw_sampling[index_wd1*pmw->tau_size+index_tw_local];
}else{
x0 = pmw->tau_sampling[index_wd1*pmw->tau_size+index_tw_local];
}
x1 = pma->tau0*(1-t)+t*x0;
x2 = pma->tau0*(1-1.0/t)+(1.0/t)*x0;
class_call(matter_get_prepared_window_at(pma,
x0,
index_ic1,
index_radtp1,
index_wd1,
&last_index0,
derivative_type1,
&window0_val),
pma->error_message,
pma->error_message);
class_call(matter_get_prepared_window_at(pma,
x1,
index_ic2,
index_radtp2,
index_wd2,
&last_index1,
derivative_type2,
&window1_val),
pma->error_message,
pma->error_message);
class_call(matter_get_prepared_window_at(pma,
x2,
index_ic2,
index_radtp2,
index_wd2,
&last_index2,
derivative_type2,
&window2_val),
pma->error_message,
pma->error_message);
double temp_first,temp_second;
temp_first = window0_val*window1_val;
temp_second = exp_factor*window0_val*window2_val;
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){//was cutoff?
integrand_real[index_coeff*pmw->tau_size+index_tw_local] = temp_first*wint_fft_real[index_tw_local][index_coeff]+temp_second*(wint_fft_real[index_tw_local+pmw->tau_size][index_coeff]*cos_val[index_coeff]-wint_fft_imag[index_tw_local+pmw->tau_size][index_coeff]*sin_val[index_coeff]);
integrand_imag[index_coeff*pmw->tau_size+index_tw_local] = temp_first*wint_fft_imag[index_tw_local][index_coeff]+temp_second*(wint_fft_imag[index_tw_local+pmw->tau_size][index_coeff]*cos_val[index_coeff]+wint_fft_real[index_tw_local+pmw->tau_size][index_coeff]*sin_val[index_coeff]);
}
//End coeff
}
//End tw
free(cos_val);
free(sin_val);
return _SUCCESS_;
}
/**
* Small helper function specifying the cosmological function
* asymptote as a function of t for two windows in non-integrated
* contributions without derivatives for gaussian windows,
* to possibly improve interpolation accuracy
*
* @param ppr Input: pointer to precision struct
* @param pma Input: pointer to matter struct
* @param t Input: current value of t
* @param index_wd1 Input: current index of window 1
* @param index_wd2 Input: current index of window 2
* @param result Output: pointer to output
* @return the error status
*/
int matter_asymptote(struct matters* pma,double t, int index_wd1, int index_wd2,double* result){
double x1 = pma->tau0-0.5*(pma->tw_max[index_wd1]+pma->tw_min[index_wd1]);
double x2 = pma->tau0-0.5*(pma->tw_max[index_wd2]+pma->tw_min[index_wd2]);
double sigma1 = 0.5*(pma->tw_max[index_wd1]-pma->tw_min[index_wd1])/pma->selection_cut_at_sigma;
double sigma2 = 0.5*(pma->tw_max[index_wd2]-pma->tw_min[index_wd2])/pma->selection_cut_at_sigma;
*result = exp(-0.5*(x1*t-x2)*(x1*t-x2)/(sigma1*sigma1*t*t+sigma2*sigma2))+exp(-0.5*(x2*t-x1)*(x2*t-x1)/(sigma2*sigma2*t*t+sigma1*sigma1));
return _SUCCESS_;
}
/**
* Precompute chi^(1-nu) or chi^(2-nu) for normal and logarithmic
* chi integration respectively
*
* dchi = dlog(chi) * chi
*
* @param pma Input: pointer to matter struct
* @param index_wd Input: current window index
* @param pref_real Output: array of prefactors (real part)
* @param pref_imag Output: array of prefactors (imaginary part)
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_precompute_chit_factors(struct matters* pma,
int index_wd,
double* pref_real,
double* pref_imag,
struct matters_workspace* pmw){
int is_log = pma->uses_intxi_logarithmic && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2;
int index_tau;
double logxi;
double exp_factor,phase;
int index_coeff;
if(is_log == _TRUE_){
for(index_tau=0;index_tau<pmw->tau_size;++index_tau){
logxi = pmw->tau_sampling[index_wd*pmw->tau_size+index_tau];
exp_factor = exp(logxi*(2.0-pma->nu_real[pmw->index_tilt1_tilt2]));
//exp_factor = exp(logxi*(2.0-pma->bias));
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
phase = -logxi*pma->nu_imag[index_coeff];
pref_real[index_tau*pma->size_fft_result+index_coeff] = exp_factor*cos(phase);
pref_imag[index_tau*pma->size_fft_result+index_coeff] = exp_factor*sin(phase);
}
}
//End tw local
}else{
for(index_tau=0;index_tau<pmw->tau_size;++index_tau){
logxi = log((pma->tau0-pmw->tau_sampling[index_wd*pmw->tau_size+index_tau]));//285+pma->small_log_offset));
exp_factor = exp(logxi*(1.0-pma->nu_real[pmw->index_tilt1_tilt2]));//was 1.0 instead of offset, revised to 1.0 on 23.10
//exp_factor = exp(logxi*(1.0-pma->bias));
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
phase = -logxi*pma->nu_imag[index_coeff];
pref_real[index_tau*pma->size_fft_result+index_coeff] = exp_factor*cos(phase);
pref_imag[index_tau*pma->size_fft_result+index_coeff] = exp_factor*sin(phase);
}
}
//End tw local
}
return _SUCCESS_;
}
/**
* Integrate the cosmological function f_n^ij(t)
*
* @param ppr Input: pointer to precision struct
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param pma Input: pointer to matter struct
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_integrate_cosmo(struct matters* pma,
struct matters_workspace* pmw
){
int index_radtp1 = pmw->index_radtp1;
int index_radtp2 = pmw->index_radtp2;
int tw_max_size = pmw->tau_max_size;
double t_min = pmw->t_min;
double t_max = pmw->t_max;
int index_spl;
int index_t,index_coeff,index_tw_local;
double intxi_local_real,intxi_local_imag;
int t_size_local = (pma->uses_intxi_interpolation?pma->t_spline_size:pma->t_size);
short integrate_logarithmically = (pma->uses_integration == matter_integrate_tw_logt);
double t;
double* int_real;
double* int_imag;
double** window_fft_real;
double** window_fft_imag;
double y_min,y_max;
y_min = -log(1-t_min);
y_max = -log(1-t_max);
class_call(matter_precompute_chit_factors(pma,
pmw->index_wd1,
pmw->pref_real,
pmw->pref_imag,
pmw),
pma->error_message,
pma->error_message);
if(pma->uses_intxi_symmetrized && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
class_call(matter_precompute_chit_factors(pma,
pmw->index_wd2,
pmw->pref_real+pmw->tau_max_size*pma->size_fft_result,
pmw->pref_imag+pmw->tau_max_size*pma->size_fft_result,
pmw),
pma->error_message,
pma->error_message);
}
int abort = _FALSE_;
#pragma omp parallel private(index_t,index_coeff,index_tw_local,t,int_real,int_imag,window_fft_real,window_fft_imag) firstprivate(pma,pmw,t_size_local)
{
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int_real = pmw->pmv[tid]->integrand_real;
int_imag = pmw->pmv[tid]->integrand_imag;
window_fft_real = pmw->pmv[tid]->window_fft_real;
window_fft_imag = pmw->pmv[tid]->window_fft_imag;
/**
* Now obtain the f_n^{ij}(t) function
* */
if(pma->uses_intxi_logarithmic && pma->uses_intxi_symmetrized && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
/**
* Now obtain the f_n^{ij}(t) function
* */
#pragma omp for
for(index_t=0;index_t<t_size_local;++index_t){
matter_get_t(index_t)
class_call_parallel(matter_get_half_integrand(pma,
t,
pmw->index_ic1,
pmw->index_ic2,
index_radtp1,
index_radtp2,
pmw->index_stp1_stp2,
pmw->index_wd1,
pmw->index_wd2,
int_real,
int_imag,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
class_call_parallel(matter_get_half_integrand(pma,
t,
pmw->index_ic2,
pmw->index_ic1,
index_radtp2,
index_radtp1,
pmw->index_stp2_stp1,
pmw->index_wd2,
pmw->index_wd1,
int_real+tw_max_size*pma->size_fft_result,
int_imag+tw_max_size*pma->size_fft_result,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double sum_real =0.0;
double sum_imag =0.0;
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]-pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]);
sum_imag+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]+pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]);
}
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd2*pmw->tau_size+index_tw_local]*(pmw->pref_real[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]-pmw->pref_imag[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]);
sum_imag+=pmw->tau_weights[pmw->index_wd2*pmw->tau_size+index_tw_local]*(pmw->pref_real[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]+pmw->pref_imag[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]);
}
//End tw integration
if(pma->uses_intxi_interpolation){
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_real;
pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_imag;
}
else{
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_real;
pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_imag;
}
//Ifend isinterpolated
}
//End coeff
}
//End t
}
else if(pma->uses_intxi_logarithmic && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
#pragma omp for
for(index_t=0;index_t<t_size_local;++index_t){
matter_get_t(index_t)
class_call_parallel(matter_get_ttau_integrand(pma,
t,
pmw->index_ic1,
pmw->index_ic2,
index_radtp1,
index_radtp2,
pmw->index_stp1_stp2,
pmw->index_wd1,
pmw->index_wd2,
int_real,
int_imag,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double sum_real =0.0;
double sum_imag =0.0;
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]-pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]);
sum_imag+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]+pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]);
}
//End tw integration
if(pma->uses_intxi_interpolation){
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_real;
pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_imag;
}
else{
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_real;
pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_imag;
}
//Ifend isinterpolated
}
//End coeff
}
//End t
}
else if(pma->uses_intxi_symmetrized && pmw->is_integrated_radtp1 && pmw->is_integrated_radtp2){
/**
* Now obtain the f_n^{ij}(t) function
* */
#pragma omp for
for(index_t=0;index_t<t_size_local;++index_t){
matter_get_t(index_t)
class_call_parallel(matter_get_half_integrand(pma,
t,
pmw->index_ic1,
pmw->index_ic2,
index_radtp1,
index_radtp2,
pmw->index_stp1_stp2,
pmw->index_wd1,
pmw->index_wd2,
int_real,
int_imag,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
class_call_parallel(matter_get_half_integrand(pma,
t,
pmw->index_ic2,
pmw->index_ic1,
index_radtp2,
index_radtp1,
pmw->index_stp2_stp1,
pmw->index_wd2,
pmw->index_wd1,
int_real+tw_max_size*pma->size_fft_result,
int_imag+tw_max_size*pma->size_fft_result,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double sum_real =0.0;
double sum_imag =0.0;
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]-pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]);
sum_imag+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]+pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]);
}
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd2*pmw->tau_size+index_tw_local]*(pmw->pref_real[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]-pmw->pref_imag[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]);
sum_imag+=pmw->tau_weights[pmw->index_wd2*pmw->tau_size+index_tw_local]*(pmw->pref_real[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]+pmw->pref_imag[tw_max_size*pma->size_fft_result+index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local+tw_max_size*pma->size_fft_result]);
}
//End tw integration
if(pma->uses_intxi_interpolation){
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_real;
pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_imag;
}
else{
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_real;
pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_imag;
}
//Ifend isinterpolated
}
//End coeff
}
//End t
}
else{
#pragma omp for
for(index_t=0;index_t<t_size_local;++index_t){
matter_get_t(index_t)
class_call_parallel(matter_get_ttau_integrand(pma,
t,
pmw->index_ic1,
pmw->index_ic2,
index_radtp1,
index_radtp2,
pmw->index_stp1_stp2,
pmw->index_wd1,
pmw->index_wd2,
int_real,
int_imag,
window_fft_real,
window_fft_imag,
pmw),
pma->error_message,
pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double sum_real =0.0;
double sum_imag =0.0;
for(index_tw_local=0;index_tw_local<pmw->tau_size;++index_tw_local){
sum_real+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]-pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]);
sum_imag+=pmw->tau_weights[pmw->index_wd1*pmw->tau_size+index_tw_local]*(pmw->pref_real[index_tw_local*pma->size_fft_result+index_coeff]*int_imag[index_coeff*pmw->tau_size+index_tw_local]+pmw->pref_imag[index_tw_local*pma->size_fft_result+index_coeff]*int_real[index_coeff*pmw->tau_size+index_tw_local]);
}
//End tw integration
if(pma->uses_intxi_interpolation){
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_real;
pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] = sum_imag;
}
else{
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_real;
pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = sum_imag;
}
//Ifend isinterpolated
}
//End coeff
}
//End t
}
//Ifend
}
if(abort == _TRUE_){return _FAILURE_;}
//End parallel
if(pma->uses_intxi_interpolation){
if(pma->uses_intxi_asymptotic && !(pmw->is_integrated_radtp1 || pmw->is_integrated_radtp2)){
for(index_t=0;index_t<t_size_local;++index_t){
matter_get_t(index_t)
double temp;
class_call(matter_asymptote(pma, t, pmw->index_wd1, pmw->index_wd2, &temp),pma->error_message,pma->error_message);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_t] /= temp;
}
//End coeff
}
//End t
}
//Ifend asymptotic
/**
* If we want spline interpolation,
* we first have to calculate the splines,
* and then we interpolate said splines
* */
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
class_call(array_spline_table_columns(pma->t_spline_sampling,
pma->t_spline_size,
pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2]+index_coeff*pma->t_spline_size,
1,
pmw->ddintxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2]+index_coeff*pma->t_spline_size,
_SPLINE_EST_DERIV_,
pma->error_message),
pma->error_message,
pma->error_message);
class_call(array_spline_table_columns(pma->t_spline_sampling,
pma->t_spline_size,
pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2]+index_coeff*pma->t_spline_size,
1,
pmw->ddintxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2]+index_coeff*pma->t_spline_size,
_SPLINE_EST_DERIV_,
pma->error_message),
pma->error_message,
pma->error_message);
index_spl = 0;
for(index_t=0;index_t<pma->t_size;++index_t){
matter_get_t_orig(index_t);
double a,b,h;
class_call(matter_spline_hunt(pma->t_spline_sampling,
pma->t_spline_size,
pma->t_sampling[index_t],
&index_spl,
&h,
&a,
&b,
pma->error_message),
pma->error_message,
pma->error_message);
intxi_local_real = b*pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl+1]
+a*pmw->intxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl]
+(
(b*b*b-b)*pmw->ddintxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl+1]+
(a*a*a-a)*pmw->ddintxi_spline_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl]
)*h*h/6.;
intxi_local_imag = b*pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl+1]
+a*pmw->intxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl]
+(
(b*b*b-b)*pmw->ddintxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl+1]+
(a*a*a-a)*pmw->ddintxi_spline_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_spline_size+index_spl]
)*h*h/6.;
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = intxi_local_real;
pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] = intxi_local_imag;
if(pma->uses_intxi_asymptotic && !(pmw->is_integrated_radtp1 || pmw->is_integrated_radtp2)){
double temp;
//TODO :: can be optimized (no coeff dependence)
class_call(matter_asymptote(pma, t, pmw->index_wd1, pmw->index_wd2, &temp),pma->error_message,pma->error_message);
pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t] *= temp;
}
//Ifend asymptotic
}
//End t
}
//End coeff
}
//Ifend interpolation
return _SUCCESS_;
}
/**
* Integrate each bin combination of Cl's
*
* @param ppr Input: pointer to precision struct
* @param pba Input: pointer to background struct
* @param ppt Input: pointer to perturbation struct
* @param pma Input: pointer to matter struct
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_integrate_each(struct matters* pma,
struct matters_workspace * pmw
){
/**
* Define and allocate local variables
* */
int index_coeff;
int index_radtp1,index_radtp2;
int index_radtp_of_bitp1,index_radtp_of_bitp2;
int index_bitp1,index_bitp2;
int index_tilt1,index_tilt2,index_tilt1_tilt2;
double sum_temp = 0.0;
int index_l;
double t_min,t_max,y_min,y_max;
int index_t;
double t;
double intxi_local_real,intxi_local_imag;
int integrated_t_offset;
short integrate_logarithmically = (pma->uses_integration == matter_integrate_tw_logt);
int print_total_index = 0;
double* sum_l;
class_alloc(sum_l,
pma->l_size*sizeof(double),
pma->error_message);
memset(sum_l,0,pma->l_size*sizeof(double));
/**
* Get the necessary bessel integrals
* and store in pre-prepared arrays
* */
if(pma->has_unintegrated_windows){
int abort = _FALSE_;
#pragma omp parallel private(index_l) firstprivate(pma,pmw)
{
#pragma omp for
for(index_l=0;index_l<pma->l_size;++index_l){
if(!pma->uses_limber_approximation){
class_call_parallel(matter_get_bessel_fort_parallel(pma,
index_l,
pmw
),
pma->error_message,
pma->error_message);
}
//Ifend limber
}
//End l
}
if(abort == _TRUE_) {return _FAILURE_;}
}
/**
* Now iterate through all bessel integral types
* and radial types
* */
short switched_flag = _FALSE_;
short type_doubling = _FALSE_;
for(index_bitp1 = 0; index_bitp1< pma->bitp_size;++index_bitp1){
if(pma->has_bitp_normal && index_bitp1 == pma->bitp_index_normal){
index_tilt1 = pma->tilt_index_normal;
}
else{
index_tilt1 = pma->tilt_index_reduced;
}
for(index_bitp2 = 0; index_bitp2< pma->bitp_size;++index_bitp2){
if(pma->has_bitp_normal && index_bitp2 == pma->bitp_index_normal){
index_tilt2 = pma->tilt_index_normal;
}
else{
index_tilt2 = pma->tilt_index_reduced;
}
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
pmw->index_tilt1_tilt2 = index_tilt1_tilt2;
for(index_radtp_of_bitp1 =0; index_radtp_of_bitp1 < pma->radtp_of_bitp_size[pmw->index_cltp1*pma->bitp_size+index_bitp1];++index_radtp_of_bitp1){
for(index_radtp_of_bitp2 =0; index_radtp_of_bitp2 < pma->radtp_of_bitp_size[pmw->index_cltp2*pma->bitp_size+index_bitp2]; ++index_radtp_of_bitp2){
index_radtp1 = pma->radtps_of_bitp[pmw->index_cltp1*pma->bitp_size+index_bitp1][index_radtp_of_bitp1];
index_radtp2 = pma->radtps_of_bitp[pmw->index_cltp2*pma->bitp_size+index_bitp2][index_radtp_of_bitp2];
pmw->index_radtp1 = index_radtp1;
pmw->index_radtp2 = index_radtp2;
/* Do test of skipping over a type due to symmetry in all other indices */
if((pmw->index_wd1==pmw->index_wd2) && (pmw->index_cltp1 == pmw->index_cltp2)){
if(index_radtp2 > index_radtp1){type_doubling = _FALSE_;continue;}
else if(index_radtp2 < index_radtp1){type_doubling = _TRUE_;}
else{type_doubling = _FALSE_;}
}else{type_doubling = _FALSE_;}
/* Check for swapping around types to reduce computational effort of cross terms */
if(matter_is_integrated(index_radtp1) && !(matter_is_integrated(index_radtp2))){
class_call(matter_swap_workspace(pmw),
pma->error_message,
pma->error_message);
switched_flag = _TRUE_;
}
index_radtp1 = pmw->index_radtp1;
index_radtp2 = pmw->index_radtp2;
pmw->is_integrated_radtp1 = matter_is_integrated(index_radtp1);
pmw->is_integrated_radtp2 = matter_is_integrated(index_radtp2);
if(pma->uses_density_splitting && pma->has_stp_delta_m && pma->uses_limber_approximation && (index_radtp1 == pma->radtp_dens1 || index_radtp2 == pma->radtp_dens1)){
continue;
}
print_total_index++;
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION && !MATTER_REWRITE_PRINTING){
printf(" -> BI types [%1d,%1d] (sizes [%2d,%2d]), RAD types [%2d,%2d] (Total %3d/%3d)",index_bitp1,index_bitp2,pma->radtp_of_bitp_size[index_bitp1],pma->radtp_of_bitp_size[index_bitp2],index_radtp1,index_radtp2,print_total_index,pma->radtp_size_total*pma->radtp_size_total);
}
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION && MATTER_REWRITE_PRINTING){
printf("\r -> BI types [%1d,%1d] (sizes [%2d,%2d]), RAD types [%2d,%2d] (Total %3d/%3d)",index_bitp1,index_bitp2,pma->radtp_of_bitp_size[index_bitp1],pma->radtp_of_bitp_size[index_bitp2],index_radtp1,index_radtp2,print_total_index,pma->radtp_size_total*pma->radtp_size_total);
fflush(stdout);
}
/**
* First define correct t sampling
* */
matter_get_t_limits(pmw->index_wd1,pmw->index_wd2)
pmw->tau_weights = pma->tw_weights;
pmw->tau_size = pma->tw_size;
pmw->tau_sampling = pma->tw_sampling;
if(pmw->is_integrated_radtp1){
t_min = 0.0+pma->bi_maximal_t_offset;//186+pma->bi_maximal_t_offset;
t_max = 1.0-pma->bi_maximal_t_offset;//186-pma->bi_maximal_t_offset found important;
pmw->tau_weights = pma->integrated_tw_weights;
pmw->tau_size = pma->integrated_tw_size;
pmw->tau_sampling = pma->integrated_tw_sampling;
}
if(pmw->is_integrated_radtp2){
t_min = 0.0+pma->bi_maximal_t_offset;//186+pma->bi_maximal_t_offset;
t_max = 1.0-pma->bi_maximal_t_offset;//186-pma->bi_maximal_t_offset found important;
}
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION && pma->matter_verbose > MATTER_VERBOSITY_RANGES && !MATTER_REWRITE_PRINTING){
printf(" -> t range from %.10e to %.10e \n",t_min,t_max);
}
//TODO :: reformulate
class_test(t_min >= t_max,
pma->error_message,
"Adjust matter_t_offset \n");
y_min = -log(1-t_min);
y_max = -log(1-t_max);
integrated_t_offset = pma->t_size*((pmw->is_integrated_radtp1 || pmw->is_integrated_radtp2)?1:0);//pma->t_size*(is_integrated1*2+is_integrated2);
pmw->t_min = t_min;
pmw->t_max = t_max;
class_call(matter_integrate_cosmo(pma,
pmw),
pma->error_message,
pma->error_message);
/**
* We have now obtained the f_n^{ij}(t)
*
* Now we can advance to integrating the final Cl's
* */
for(index_l=0;index_l<pma->l_size;++index_l){
double sum_t = 0.0;
for(index_t=0;index_t<pma->t_size;++index_t){
matter_get_t_orig(index_t)
sum_temp = 0.0;
double bes_local_real,bes_local_imag;
/**
* The reverse FFT has
*
* index 0 with a factor of 1
* index N with a factor of 1
* index i with a factor of 2,
* since here there would theoretically be
* both index i and N-i,
* but we relate N-i to i by symmetry
* of having a real integrand
*
* */
intxi_local_real = pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][0*pma->t_size+index_t];
intxi_local_imag = pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][0*pma->t_size+index_t];
bes_local_real = pmw->window_bessel_real[index_l][index_tilt1_tilt2*pma->size_fft_result+0][index_t+integrated_t_offset];
bes_local_imag = pmw->window_bessel_imag[index_l][index_tilt1_tilt2*pma->size_fft_result+0][index_t+integrated_t_offset];
sum_temp +=intxi_local_real*bes_local_real-intxi_local_imag*bes_local_imag;
if(pma->size_fft_cutoff==pma->size_fft_result){
intxi_local_real = pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][(pma->size_fft_result-1)*pma->t_size+index_t];
intxi_local_imag = pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][(pma->size_fft_result-1)*pma->t_size+index_t];
bes_local_real = pmw->window_bessel_real[index_l][index_tilt1_tilt2*pma->size_fft_result+(pma->size_fft_result-1)][index_t+integrated_t_offset];
bes_local_imag = pmw->window_bessel_imag[index_l][index_tilt1_tilt2*pma->size_fft_result+(pma->size_fft_result-1)][index_t+integrated_t_offset];
sum_temp +=intxi_local_real*bes_local_real-intxi_local_imag*bes_local_imag;
}
for(index_coeff=1;index_coeff<pma->size_fft_cutoff-1;++index_coeff){
intxi_local_real = pmw->intxi_real[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t];
intxi_local_imag = pmw->intxi_imag[index_radtp1*pma->radtp_size_total+index_radtp2][index_coeff*pma->t_size+index_t];
bes_local_real = pmw->window_bessel_real[index_l][index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+integrated_t_offset];
bes_local_imag = pmw->window_bessel_imag[index_l][index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+integrated_t_offset];
sum_temp +=2.0*(intxi_local_real*bes_local_real-intxi_local_imag*bes_local_imag);
}
if((pma->uses_integration == matter_integrate_tw_logt) && !pma->uses_limber_approximation){
sum_t+=(1-t)*pma->t_weights[index_t]*sum_temp;
}
else{
sum_t+=pma->t_weights[index_t]*sum_temp;
}
}
/**
* Since we only integrate over t_min to t_max,
* we have to rescale the final value obtained
* (The t_weights are always scaled as 0 to 1)
* */
if(!(pma->uses_integration == matter_integrate_tw_logt) && !pma->uses_limber_approximation){
sum_t*=(t_max-t_min);
}
else if(!pma->uses_limber_approximation){
sum_t*=(y_max-y_min);
}
if(pma->has_bitp_lfactor && index_bitp2 == pma->bitp_index_lfactor){
sum_t*=sqrt((pma->l_sampling[index_l]-1.0)*pma->l_sampling[index_l]*(pma->l_sampling[index_l]+1.0)*(pma->l_sampling[index_l]+2.0));
}
if(pma->has_bitp_lfactor && index_bitp1 == pma->bitp_index_lfactor){
sum_t*=sqrt((pma->l_sampling[index_l]-1.0)*pma->l_sampling[index_l]*(pma->l_sampling[index_l]+1.0)*(pma->l_sampling[index_l]+2.0));
}
if(type_doubling == _TRUE_){sum_t*=2.;}
sum_l[index_l] += sum_t;
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION_PARTIAL && !MATTER_REWRITE_PRINTING){
printf("(l:%i (bitp1 =%i ,bitp2 =%i, radtp1 =%i , radtp2 = %i) = %.10e (total = %.10e) \n",(int)pma->l_sampling[index_l],index_bitp1,index_bitp2,index_radtp1,index_radtp2,sum_t,sum_l[index_l]);
}
}
//End l
if(switched_flag==_TRUE_){
class_call(matter_swap_workspace(pmw),
pma->error_message,
pma->error_message);
switched_flag = _FALSE_;
}
index_radtp1 = pmw->index_radtp1;
index_radtp2 = pmw->index_radtp2;
}
//End radtp2
}
//End radtp1
}
//End bitp2
}
//End bitp1
/**
* Print final results
* */
if(MATTER_REWRITE_PRINTING && pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION){
printf("\r -> Output for current Window Combination : \n");
}
for(index_l=0;index_l<pma->l_size;++index_l){
if(pma->matter_verbose > MATTER_VERBOSITY_CLCALCULATION){
printf("(l:%i) = %.10e \n",(int)pma->l_sampling[index_l],sum_l[index_l]);
}
pma->cl[pmw->index_cltp1_cltp2][pmw->window_counter*pma->l_size+index_l] = sum_l[index_l];
}
free(sum_l);
return _SUCCESS_;
}
/**
* Small helper function to get bessel integrals for every value of t
* in the limber approximation
*
* @param pma Input: pointer to matter struct
* @param index_l Input: l index
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_get_bessel_limber(
struct matters* pma,
int index_l,
struct matters_workspace * pmw
){
double** window_bessel_real = pmw->window_bessel_real[index_l];
double** window_bessel_imag = pmw->window_bessel_imag[index_l];
int index_tilt1_tilt2;
int index_coeff;
double l0 = sqrt(pma->l_sampling[index_l]*(pma->l_sampling[index_l]+1.));
for(index_tilt1_tilt2=0;index_tilt1_tilt2<pma->tilt_grid_size;++index_tilt1_tilt2){
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
double exp_factor = exp(log(l0)*(pma->nu_real[index_tilt1_tilt2]-3.));
double phase = log(l0)*pma->nu_imag[index_coeff];
/**
* Since at t=1, we cannot split integration into 1/t and t,
* only half of the single point t=1 contributes
* The theoretical factor would be (2pi^2),
* but we get aformentioned additional factor of 1/2.
* */
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][0] = _PI_*_PI_*exp_factor*cos(phase);
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][0] = _PI_*_PI_*exp_factor*sin(phase);
if(pma->has_integrated_windows){
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][0+pma->t_size] = _PI_*_PI_*exp_factor*cos(phase);
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][0+pma->t_size] = _PI_*_PI_*exp_factor*sin(phase);
}
}
//End coeff
}
//End tilt grid
return _SUCCESS_;
}
/**
* Small helper function to get the type of derivative acting
* on the window function depending on the radial type
*
* @param pma Input: pointer to matter struct
* @param derivative_type1 Output: pointer to first derivative type
* @param derivative_type2 Output: pointer to second derivative type
* @param index_radtp1 Input: radial type of first source
* @param index_radtp2 Input: radial type of second source
* @return the error status
*/
int matter_get_derivative_type(
struct matters* pma,
int* derivative_type1,
int* derivative_type2,
int index_radtp1,
int index_radtp2
){
if(!pma->uses_relative_factors){
if(pma->has_stp_delta_m && pma->uses_density_splitting){
if(index_radtp1 == pma->radtp_dens1){
*derivative_type1 = 4;
}
if(index_radtp2 == pma->radtp_dens1){
*derivative_type2 = 4;
}
}
if(pma->has_redshift_space_distortion && (!pma->uses_rsd_combination)){
if(index_radtp1 == pma->radtp_dop1){
*derivative_type1 = 1;
}
if(index_radtp1 == pma->radtp_rsd){
*derivative_type1 = 2;
}
if(index_radtp2 == pma->radtp_dop1){
*derivative_type2 = 1;
}
else if(index_radtp2 == pma->radtp_rsd){
*derivative_type2 = 2;
}
}
}
if(pma->has_lensing_terms){
if(index_radtp1 == pma->radtp_nclens){
*derivative_type1 = -1;
}
if(index_radtp2 == pma->radtp_nclens){
*derivative_type2 = -1;
}
}
if(pma->has_cl_shear){
if(index_radtp1 == pma->radtp_shlens){
*derivative_type1 = -1;
}
if(index_radtp2 == pma->radtp_shlens){
*derivative_type2 = -1;
}
}
if(pma->has_gravitational_terms){
if(index_radtp1 == pma->radtp_g4 || index_radtp1 == pma->radtp_g5){
*derivative_type1 = -1;
}
if(index_radtp2 == pma->radtp_g4 || index_radtp2 == pma->radtp_g5){
*derivative_type2 = -1;
}
}
return _SUCCESS_;
}
/**
* Small helper function to get bessel integrals for every value of t
* in a parrallelized code for non-integrated contributions
*
* @param pba Input: pointer to background struct
* @param pma Input: pointer to matter struct
* @param index_l Input: l index
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_get_bessel_fort_parallel(
struct matters* pma,
int index_l,
struct matters_workspace* pmw
){
int index_l_eval = (pma->uses_bessel_storeall?pma->l_sampling[index_l]:index_l);
int index_wd1 = pmw->index_wd1;
int index_wd2 = pmw->index_wd2;
double** window_bessel_real = pmw->window_bessel_real[index_l];
double** window_bessel_imag = pmw->window_bessel_imag[index_l];
short integrate_logarithmically = (pma->uses_integration == matter_integrate_tw_logt);
int index_coeff;
double res_real,res_imag;
int last_t = 0;
double t_min,t_max;
int index_tilt1,index_tilt2,index_tilt1_tilt2;
matter_get_t_limits(index_wd1,index_wd2)
double y_min,y_max;
y_min = -log(1-t_min);
y_max = -log(1-t_max);
int index_t;
double t,eval_pt=0.0;
double a,b,h;
double* bi_real_i;
double* bi_imag_i;
double* ddbi_real_i;
double* ddbi_imag_i;
if(pma->has_unintegrated_windows){
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
last_t = pma->bi_size[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff]-2;
bi_real_i = pma->bi_real[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
bi_imag_i = pma->bi_imag[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
ddbi_real_i = pma->ddbi_real[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
ddbi_imag_i = pma->ddbi_imag[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
// HERE :: last_t t change
last_t = 0;
for(index_t=0;index_t<pma->t_size;++index_t){
matter_get_t_orig(index_t);
eval_pt = 1.0-t;
if(eval_pt>pma->bi_max[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff]){
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t] = 0.0;
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t] = 0.0;
continue;
}
class_call(matter_spline_hunt(pma->bi_sampling,
pma->bi_size[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff],
eval_pt,
&last_t,
&h,
&a,
&b,
pma->error_message),
pma->error_message,
pma->error_message);
res_real =a * bi_real_i[last_t] + b * bi_real_i[last_t+1] + ((a*a*a-a)* ddbi_real_i[last_t] + (b*b*b-b)* ddbi_real_i[last_t+1])*h*h/6.;
res_imag =a * bi_imag_i[last_t] + b * bi_imag_i[last_t+1] + ((a*a*a-a)* ddbi_imag_i[last_t] + (b*b*b-b)* ddbi_imag_i[last_t+1])*h*h/6.;
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t] = res_real;
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t] = res_imag;
}
//End t
}
//End coeff
}
//End tilt2
}
//End tilt1
}
//End iff
return _SUCCESS_;
}
/**
* Small helper function to get bessel integrals for every value of t
* in a parrallelized code for integrated contributions
*
* @param pba Input: pointer to background struct
* @param pma Input: pointer to matter struct
* @param index_l Input: l index
* @param pmw Input: pointer to matter workspace
* @return the error status
*/
int matter_get_bessel_fort_parallel_integrated(
struct matters* pma,
int index_l,
struct matters_workspace* pmw
){
double** window_bessel_real = pmw->window_bessel_real[index_l];
double** window_bessel_imag = pmw->window_bessel_imag[index_l];
int index_l_eval = (pma->uses_bessel_storeall?pma->l_sampling[index_l]:index_l);
short integrate_logarithmically = (pma->uses_integration == matter_integrate_tw_logt);
int index_coeff;
double res_real,res_imag;
int last_t = 0;
double t_min = 0.0,t_max = 1.0;
int index_tilt1,index_tilt2,index_tilt1_tilt2;
double a,b,h;
double* bi_real_i;
double* bi_imag_i;
double* ddbi_real_i;
double* ddbi_imag_i;
if(pma->has_integrated_windows){
class_test(!pma->has_tilt_reduced,
pma->error_message,
"Has integrated windows, but not reduced tilt. This is a bug.");
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
t_max = 1.0-pma->bi_maximal_t_offset;//186-pma->bi_maximal_t_offset found important;
t_min = 0.0+pma->bi_maximal_t_offset;//186+pma->bi_maximal_t_offset;
double y_min,y_max;
y_min = -log(1-t_min);
y_max = -log(1-t_max);//+pma->bi_maximal_t_offset);
int index_t;
double t,eval_pt=0.0;
for(index_coeff=0;index_coeff<pma->size_fft_cutoff;++index_coeff){
last_t = pma->bi_size[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff]-2;
bi_real_i = pma->bi_real[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
bi_imag_i = pma->bi_imag[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
ddbi_real_i = pma->ddbi_real[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
ddbi_imag_i = pma->ddbi_imag[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff];
// HERE :: last_t t change
last_t = 0;
for(index_t=0;index_t<pma->t_size;++index_t){
matter_get_t_orig(index_t);
eval_pt = 1.0-t;
if(eval_pt>pma->bi_max[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff]){
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+pma->t_size] = 0.0;
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+pma->t_size] = 0.0;
continue;
}
class_call(matter_spline_hunt(pma->bi_sampling,
pma->bi_size[index_tilt1_tilt2][index_l_eval*pma->size_fft_result+index_coeff],
eval_pt,
&last_t,
&h,
&a,
&b,
pma->error_message),
pma->error_message,
pma->error_message);
res_real =a * bi_real_i[last_t] + b * bi_real_i[last_t+1] + ((a*a*a-a)* ddbi_real_i[last_t] + (b*b*b-b)* ddbi_real_i[last_t+1])*h*h/6.;
res_imag =a * bi_imag_i[last_t] + b * bi_imag_i[last_t+1] + ((a*a*a-a)* ddbi_imag_i[last_t] + (b*b*b-b)* ddbi_imag_i[last_t+1])*h*h/6.;
window_bessel_real[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+pma->t_size] = res_real;
window_bessel_imag[index_tilt1_tilt2*pma->size_fft_result+index_coeff][index_t+pma->t_size] = res_imag;
}
//End t
}
//End coeff
}
//End tilt2
}
//End tilt1
}
//End iff
return _SUCCESS_;
}
/**
* Small helper function for preparing an interpolation hunt,
* searching with binary search for the starting position of the hunt.
*
* @param x_array Input: pointer to x array
* @param x_size Input: size of x array
* @param x Input: x position to search at
* @param last Input/Output: last found position
* @param err_msg Input/Output: Error messages
* @return the error status
*/
int matter_spline_prepare_hunt(
double* x_array,
int x_size,
double x,
int* last,
ErrorMsg errmsg){
int inf,sup,mid;
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
*last = inf;
return _SUCCESS_;
}
if (x > x_array[sup]) {
*last = sup;
return _SUCCESS_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
*last = sup;
return _SUCCESS_;
}
if (x > x_array[inf]) {
*last = inf;
return _SUCCESS_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last = inf;
return _SUCCESS_;
}
/**
* Small helper function for doing the interpolation hunt without
* returning the final array
*
* Starts searching at last found position and gradually increases
* stepsize of search
*
* Returns the found interval of the x as the index last in the array,
* the width of the interval h, and the parameters a,b quantifying the
* relative distance x along the interval
*
* a=1 if x is on the left border, and a=0 if on the right border
* b=1-a
* @param x_array Input: pointer to x array
* @param x_size Input: size of x array
* @param x Input: x position to search at
* @param last Input/Output: last found position
* @param h Output: Width of interval of found x
* @param a Output: Relative distance along interval
* @param b Output: 1-a
* @param err_msg Input/Output: Error messages
* @return the error status
*/
int matter_spline_hunt(
double* x_array,
int x_size,
double x,
int* last,
double* h,
double* a,
double* b,
ErrorMsg errmsg
){
int last_index = *last;
//Old :: last_index = x_size-1 (and if >=x_size) , but here inf = last_index,sup=last_index+1 => x_size => eval of array AT x_size, which is disallowed
if(last_index>=x_size-1){last_index=x_size-2;}
if(last_index<0){last_index=0;}
int inf,sup,mid,inc;
inc=1;
if (x >= x_array[last_index]) {
if (x > x_array[x_size-1]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[x_size-1]);
return _FAILURE_;
}
/* try closest neighboor upward */
inf = last_index;
sup = inf + inc;
if (x > x_array[sup]) {
/* hunt upward */
while (x > x_array[sup]) {
inf = sup;
inc += 1;
sup += inc;
if (sup > x_size-1) {
sup = x_size-1;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
else {
if (x < x_array[0]) {
sprintf(errmsg,"%s(L:%d) : x=%.20e < x_min=%.20e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
/* try closest neighboor downward */
sup = last_index;
inf = sup - inc;
if (x < x_array[inf]) {
/* hunt downward */
while (x < x_array[inf]) {
sup = inf;
inc += 1;
inf -= inc;
if (inf < 0) {
inf = 0;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
last_index = inf;
*last = last_index;
*h = x_array[sup] - x_array[inf];
*b = (x-x_array[inf])/(*h);
*a = 1.0-(*b);
return _SUCCESS_;
}
/**
* Small helper function for swapping around the indices in the matter_workspace
*
* @param pmw Input: pointer to matter workspace structure
* @return the error status
*/
int matter_swap_workspace(struct matters_workspace* pmw){
int temp;
temp = pmw->index_wd2;
pmw->index_wd2 = pmw->index_wd1;
pmw->index_wd1 = temp;
temp = pmw->index_radtp1;
pmw->index_radtp1 = pmw->index_radtp2;
pmw->index_radtp2 = temp;
temp = pmw->index_ic1;
pmw->index_ic2 = pmw->index_ic1;
pmw->index_ic1 = temp;
temp = pmw->index_cltp1;
pmw->index_cltp1 = pmw->index_cltp2;
pmw->index_cltp2 = temp;
return _SUCCESS_;
}
/**
* Write the bessel integral file, including the header,
* and the actual data
*
* @param pma Input: pointer to matter structure
* @return the error status
*/
int matter_write_bessel_integrals(struct matters* pma){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Writing bessel file \n");
}
/**
* Define initial variables
* */
FILE* write_file;
int index_tilt1_tilt2;
int index_fft_l;
/**
* Open file to write
* */
write_file = fopen(pma->bessel_file_name,"wb");
class_test(!write_file,
pma->error_message,
"Could not create file %s \n",pma->bessel_file_name);
/**
* Write header
* */
fwrite(&(pma->tilt_grid_size),sizeof(int),1,write_file);
fwrite(&(pma->size_fft_result),sizeof(int),1,write_file);
fwrite(&(pma->l_size_recursion),sizeof(int),1,write_file);
fwrite(&(pma->bessel_recursion_t_size),sizeof(int),1,write_file);
/**
* Write sampling
* */
fwrite(pma->bi_sampling,sizeof(double),(pma->bessel_recursion_t_size+2),write_file);
/**
* Write actual data
* */
for(index_tilt1_tilt2=0;index_tilt1_tilt2<pma->tilt_grid_size;++index_tilt1_tilt2){
for(index_fft_l=0;index_fft_l<pma->size_fft_result*pma->l_size_recursion;++index_fft_l){
fwrite(&(pma->bi_size[index_tilt1_tilt2][index_fft_l]),sizeof(int),1,write_file);
fwrite(&(pma->bi_max[index_tilt1_tilt2][index_fft_l]),sizeof(double),1,write_file);
fwrite(pma->bi_real[index_tilt1_tilt2][index_fft_l],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_fft_l],write_file);
fwrite(pma->bi_imag[index_tilt1_tilt2][index_fft_l],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_fft_l],write_file);
fwrite(pma->ddbi_real[index_tilt1_tilt2][index_fft_l],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_fft_l],write_file);
fwrite(pma->ddbi_imag[index_tilt1_tilt2][index_fft_l],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_fft_l],write_file);
}
}
/**
* Close the file
* */
fclose(write_file);
return _SUCCESS_;
}
/**
* Read the contents of the bessel file. Gives error if header
* or contents are invalid.
*
* @param pma Input: pointer to matter structure
* @param is_correct_file Output: pointer to correctness flag output
* @return the error status
*/
int matter_read_bessel_integrals(struct matters* pma){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Reading bessel file content \n");
}
/**
* Define initial variables
* */
int index_tilt1,index_tilt2,index_tilt1_tilt2;
int index_l,index_coeff;
int tilt_grid_size_temp;
int fft_size_temp;
int l_size_temp;
int bessel_recursion_t_size_temp;
int f_read;
#ifdef _OPENMP
double read_start_omp = omp_get_wtime();
#else
double read_start_omp = 0.0;
#endif
FILE* read_file;
/**
* Open file for reading and check for errors during opening
* */
read_file = fopen(pma->bessel_file_name,"rb");
class_test(!read_file,
pma->error_message,
"file '%s' missing/unopenable even though initial check indicated existence.",pma->bessel_file_name);
/**
* Read header
* */
f_read = 0;
f_read+=fread(&tilt_grid_size_temp,sizeof(int),1,read_file);
f_read+=fread(&fft_size_temp,sizeof(int),1,read_file);
f_read+=fread(&l_size_temp,sizeof(int),1,read_file);
f_read+=fread(&bessel_recursion_t_size_temp,sizeof(int),1,read_file);
class_test(f_read!=4,
pma->error_message,
"file '%s' is corrupted even though initial check indicated none.",pma->bessel_file_name);
/**
* Allocate arrays to store content of file in
* */
class_alloc(pma->bi_real,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->bi_imag,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->bi_size,
pma->tilt_grid_size*sizeof(int*),
pma->error_message);
class_alloc(pma->bi_max,
pma->tilt_grid_size*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_sampling,
(pma->bessel_recursion_t_size+2)*sizeof(double),
pma->error_message);
class_alloc(pma->ddbi_real,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
class_alloc(pma->ddbi_imag,
pma->tilt_grid_size*sizeof(double**),
pma->error_message);
for(index_tilt1=0;index_tilt1<pma->tilt_size;++index_tilt1){
for(index_tilt2=index_tilt1;index_tilt2<pma->tilt_size;++index_tilt2){
index_tilt1_tilt2 = index_symmetric_matrix(index_tilt1,index_tilt2,pma->tilt_size);
class_alloc(pma->bi_real[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_imag[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->bi_size[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(int),
pma->error_message);
class_alloc(pma->bi_max[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double),
pma->error_message);
class_alloc(pma->ddbi_real[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
class_alloc(pma->ddbi_imag[index_tilt1_tilt2],
pma->l_size_recursion*pma->size_fft_result*sizeof(double*),
pma->error_message);
}
}
/**
* Check header correctness
* */
class_test(pma->tilt_grid_size!=tilt_grid_size_temp,
pma->error_message,
"Invalid file read (tilt_grid)");
class_test(pma->size_fft_result!=fft_size_temp,
pma->error_message,
"Invalid file read (fft)");
class_test(pma->l_size_recursion!=l_size_temp,
pma->error_message,
"Invalid file read (l_size)");
class_test(pma->bessel_recursion_t_size!=bessel_recursion_t_size_temp,
pma->error_message,
"Invalid file read (t_size)");
/**
* Read t sampling
* */
f_read = 0;
f_read+=fread(pma->bi_sampling,sizeof(double),(pma->bessel_recursion_t_size+2),read_file);
class_test(f_read!=(pma->bessel_recursion_t_size+2),
pma->error_message,
"Invalid file read (bi_sampling)");
/**
* Read all content and for each iteration check if content is read correctly
* */
for(index_tilt1_tilt2=0;index_tilt1_tilt2<pma->tilt_grid_size;++index_tilt1_tilt2){
for(index_l=0;index_l<pma->l_size_recursion;++index_l){
for(index_coeff=0;index_coeff<pma->size_fft_result;++index_coeff){
f_read=0;
f_read+=fread(&(pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]),sizeof(int),1,read_file);
f_read+=fread(&(pma->bi_max[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]),sizeof(double),1,read_file);
class_alloc(pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
(pma->bessel_recursion_t_size+2)*sizeof(double),
pma->error_message);
class_alloc(pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
(pma->bessel_recursion_t_size+2)*sizeof(double),
pma->error_message);
class_alloc(pma->ddbi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
class_alloc(pma->ddbi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff]*sizeof(double),
pma->error_message);
f_read+=fread(pma->bi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],read_file);
f_read+=fread(pma->bi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],read_file);
f_read+=fread(pma->ddbi_real[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],read_file);
f_read+=fread(pma->ddbi_imag[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],sizeof(double),pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],read_file);
class_test(f_read!=2+4*pma->bi_size[index_tilt1_tilt2][index_l*pma->size_fft_result+index_coeff],
pma->error_message,
"Invalid file read (bi_size,bi_max,bi_real,bi_imag,ddbi_real or ddbi_imag)");
}
}
}
/**
* Close the file
* */
fclose(read_file);
#ifdef _OPENMP
double read_end_omp = omp_get_wtime();
#else
double read_end_omp = 0.0;
#endif
if(pma->matter_verbose > MATTER_VERBOSITY_TIMING ){
printf(" -> Reading bessel integrals took %f REAL seconds \n",read_end_omp-read_start_omp);
}
return _SUCCESS_;
}
/**
* Read the usability of the bessel integral binary file.
* Checks for existence of file. If file exists, and it
* is readable as a binary file, and the header is correctly
* readable, and the header agrees with the precision parameters
* of the current run, the file is usable.
*
* Otherwise, it is not
*
* @param pma Input: pointer to matter structure
* @param is_correct_file Output: pointer to correctness flag output
* @return the error status
*/
int matter_read_bessel_file_correct(struct matters* pma,short* is_correct_file){
if(pma->matter_verbose > MATTER_VERBOSITY_FUNCTIONS){
printf("Method :: Reading bessel file existence/usability/correctness \n");
}
/**
* Define initial variables
* */
FILE* read_file;
int tilt_grid_size_temp;
int fft_size_temp;
int l_size_temp;
int bessel_recursion_t_size_temp;
int f_read;
/**
* Check if file is readable at all (exists)
* */
f_read = 0;
#ifdef BESSEL_DIR
char temp[100];
sprintf(pma->bessel_file_name,BESSEL_DIR);
sprintf(temp,"/bessel_%i_%i_%i_%i.bin",pma->tilt_grid_size,pma->size_fft_result,pma->l_size_recursion,pma->bessel_recursion_t_size);
strcat(pma->bessel_file_name,temp);
#else
sprintf(pma->bessel_file_name,"output/bessel_%i_%i_%i_%i.bin",pma->tilt_grid_size,pma->size_fft_result,pma->l_size_recursion,pma->bessel_recursion_t_size);
#endif
read_file = fopen(pma->bessel_file_name,"rb");
if(!read_file){*is_correct_file=_FALSE_;return _SUCCESS_;}
/**
* Check if header is readable
* */
f_read+=fread(&tilt_grid_size_temp,sizeof(int),1,read_file);
f_read+=fread(&fft_size_temp,sizeof(int),1,read_file);
f_read+=fread(&l_size_temp,sizeof(int),1,read_file);
f_read+=fread(&bessel_recursion_t_size_temp,sizeof(int),1,read_file);
if(f_read!=4){*is_correct_file=_FALSE_;}
/**
* Check if header agrees with desired precision parameters
* */
if(pma->tilt_grid_size!=tilt_grid_size_temp){*is_correct_file=_FALSE_;}
if(pma->size_fft_result!=fft_size_temp){*is_correct_file=_FALSE_;}
if(pma->l_size_recursion!=l_size_temp){*is_correct_file=_FALSE_;}
if(pma->bessel_recursion_t_size!=bessel_recursion_t_size_temp){*is_correct_file=_FALSE_;}
/**
* Close the file again
* */
fclose(read_file);
return _SUCCESS_;
}
/**
* Set the indices relevant to handling the different window functions,
* especially given the number of elements depending on cross-correlations
* between different cl types, like nCl's and sCl's
*
*
* @param pma Input: pointer to matter structure
* @param ppt Input: pointer to perturbs structure
* @return the error status
*/
int matter_obtain_window_indices(struct matters* pma){
int index_cltp1,index_cltp2,index_cltp1_cltp2;
int index_wd1;
pma->num_window_grid = (pma->num_windows*(pma->num_windows+1))/2;
//x = "missing" elements on the off-diagonal
int x = (pma->num_windows-1)-pma->non_diag;
int x_grid = (x*(x+1))/2;
class_alloc(pma->window_size,
pma->cltp_grid_size*sizeof(int),
pma->error_message);
class_alloc(pma->window_index_start,
pma->cltp_grid_size*sizeof(int*),
pma->error_message);
class_alloc(pma->window_index_end,
pma->cltp_grid_size*sizeof(int*),
pma->error_message);
for(index_cltp1=0;index_cltp1<pma->cltp_size;++index_cltp1){
for(index_cltp2=index_cltp1;index_cltp2<pma->cltp_size;++index_cltp2){
index_cltp1_cltp2 = index_symmetric_matrix(index_cltp1,index_cltp2,pma->cltp_size);
class_alloc(pma->window_index_start[index_cltp1_cltp2],
pma->num_windows*sizeof(int),
pma->error_message);
class_alloc(pma->window_index_end[index_cltp1_cltp2],
pma->num_windows*sizeof(int),
pma->error_message);
if(index_cltp1==index_cltp2){
//[NS]: All below is modified for DESC
pma->window_size[index_cltp1_cltp2]=(pma->num_windows_per_cltp[index_cltp1]*(pma->num_windows_per_cltp[index_cltp1]+1))/2; //pma->num_window_grid-x_grid;
for(index_wd1=0;index_wd1<pma->num_windows;++index_wd1){
pma->window_index_start[index_cltp1_cltp2][index_wd1]=index_wd1;
pma->window_index_end[index_cltp1_cltp2][index_wd1]=pma->num_windows_per_cltp[index_cltp1]-1;//MIN(index_wd1+pma->non_diag,pma->num_windows-1);
}
}
else{
pma->window_size[index_cltp1_cltp2]=pma->num_windows_per_cltp[index_cltp1]*pma->num_windows_per_cltp[index_cltp2];//pma->num_windows*pma->num_windows-2*x_grid;
for(index_wd1=0;index_wd1<pma->num_windows;++index_wd1){
pma->window_index_start[index_cltp1_cltp2][index_wd1]=0;//MAX(0,index_wd1-pma->non_diag);
pma->window_index_end[index_cltp1_cltp2][index_wd1]=pma->num_windows_per_cltp[index_cltp2]-1;//MIN(index_wd1+pma->non_diag,pma->num_windows-1);
}
}
}
}
return _SUCCESS_;
}
int main(){
struct matters ma;
matter_init(&ma);
matter_free(&ma);
}
|
pi2.c | /*
* Copyright (c) 2009-2010, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Oracle nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
/*
*
*/
#define num_steps 200000000
double pi = 0;
int
main(int argc, char** argv) {
int i;
double start, stop;
#ifdef _OPENMP
omp_set_num_threads(4);
omp_set_dynamic(0);
#endif
start = clock();
// we want 1/1 - 1/3 + 1/5 - 1/7 etc.
// therefore we count by fours (0, 4, 8, 12...) and take
// 1/(0+1) = 1/1
// - 1/(0+3) = -1/3
// 1/(4+1) = 1/5
// - 1/(4+3) = -1/7 and so on
#pragma omp parallel for //reduction(+:pi)
for (i = 0; i < num_steps ; i++) {
pi += 1.0/(i*4.0 + 1.0);
pi -= 1.0/(i*4.0 + 3.0);
}
stop = clock();
pi = pi * 4.0;
printf("pi done - %f in %.3f seconds\n", pi, (stop-start)/1000000);
return (EXIT_SUCCESS);
}
|
COOSIMD32Tile.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_COOSIMD32TILE_H_
#define SRC_COOSIMD32TILE_H_
#include <string>
#include <algorithm>
#include <parallel/algorithm>
#include <vector>
#include "GMDP/utils/binary_search.h"
template <typename T>
bool compare_notrans_coosimd32(const tedge_t<T> & a, const tedge_t<T> & b)
{
if(a.tile_id < b.tile_id) return true;
else if(a.tile_id > b.tile_id) return false;
if(a.dst < b.dst) return true;
else if(a.dst > b.dst) return false;
if(a.src < b.src) return true;
else if(a.src > b.src) return false;
return false;
}
struct partition_bin_coosimd32 {
int p32;
int num_total;
int num_left;
int num_taken;
};
template <typename T>
class COOSIMD32Tile {
public:
std::string name;
int m;
int n;
int nnz;
int num_partitions;
T* a; // nnz
int* ja; //nnz
int* ia; //nnz
int * partition_start; // num_partitions+1
int * simd_nnz; // num_partitions+1
// Serialize
friend boost::serialization::access;
template<class Archive>
void save(Archive& ar, const unsigned int version) const {
ar & name;
ar & m;
ar & n;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_partitions+1 ; i++)
{
ar & partition_start[i];
}
for(int i = 0 ; i < num_partitions+1 ; i++)
{
ar & simd_nnz[i];
}
}
}
template<class Archive>
void load(Archive& ar, const unsigned int version) {
ar & name;
ar & m;
ar & n;
ar & nnz;
ar & num_partitions;
if(!isEmpty())
{
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc(nnz * sizeof(int), 64));
partition_start = reinterpret_cast<int*>(_mm_malloc((num_partitions+1) * sizeof(int), 64));
simd_nnz = reinterpret_cast<int*>(_mm_malloc((num_partitions+1) * sizeof(int), 64));
for(int i = 0 ; i < nnz ; i++)
{
ar & a[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ja[i];
}
for(int i = 0 ; i < nnz ; i++)
{
ar & ia[i];
}
for(int i = 0 ; i < num_partitions+1 ; i++)
{
ar & partition_start[i];
}
for(int i = 0 ; i < num_partitions+1 ; i++)
{
ar & simd_nnz[i];
}
}
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
COOSIMD32Tile() : name("TEMP"), m(0), n(0), nnz(0) {}
COOSIMD32Tile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0) {}
COOSIMD32Tile(edge_t<T>* edges, int _m, int _n, int _nnz, int row_start,
int col_start)
: name("TEMP"), m(_m), n(_n), nnz(_nnz) {
double stt = MPI_Wtime();
if (nnz > 0) {
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
tedge_t<T> * tmpedges = reinterpret_cast<tedge_t<T> *>( _mm_malloc(((uint64_t)nnz) * (uint64_t)sizeof(tedge_t<T>), 64));
num_partitions = omp_get_max_threads() * 4;
// Set partition IDs
#pragma omp parallel for
for(int edge_id = 0 ; edge_id < nnz ; edge_id++)
{
tmpedges[edge_id].src = edges[edge_id].src - row_start;
tmpedges[edge_id].dst = edges[edge_id].dst - col_start;
tmpedges[edge_id].val = edges[edge_id].val;
tmpedges[edge_id].tile_id = (tmpedges[edge_id].src-1) / 32;
}
// Sort
__gnu_parallel::sort(tmpedges, tmpedges+((uint64_t)nnz), compare_notrans_coosimd32<T>);
#pragma omp parallel for
for(int edge_id = 0 ; edge_id < nnz ; edge_id++)
{
ia[edge_id] = tmpedges[edge_id].src;
}
// Set partitions
num_partitions = omp_get_max_threads() * 4;
int rows_per_partition = (m + num_partitions - 1) / num_partitions;
rows_per_partition = ((rows_per_partition + 31) / 32) * 32;
partition_start = new int[num_partitions+1];
simd_nnz = new int[num_partitions+1];
int nnztotal = 0;
#pragma omp parallel for
for (int p = 0; p < num_partitions ; p++)
{
int start_row = p * rows_per_partition;
int end_row = (p+1) * rows_per_partition;
if(start_row > m) start_row = m;
if(end_row > m) end_row = m;
//int start_edge_id = l_binary_search(0, nnz, ia, start_row+1);
//int end_edge_id = l_binary_search(0, nnz, ia, end_row+1);
int start_edge_id = l_linear_search(0, nnz, ia, start_row+1);
int end_edge_id = l_linear_search(0, nnz, ia, end_row+1);
partition_start[p] = start_edge_id;
#ifdef __DEBUG
assert(start_edge_id == l_linear_search(0, nnz, ia, start_row+1));
assert(end_edge_id == l_linear_search(0, nnz, ia, end_row+1));
assert(start_edge_id >= 0);
#endif
int partition_nnz = end_edge_id - start_edge_id;
}
partition_start[num_partitions] = nnz;
// Create arrays
#pragma omp parallel for
for (int p = 0; p < num_partitions; p++)
{
int start_row = p * rows_per_partition;
int end_row = (p+1) * rows_per_partition;
if(start_row > m) start_row = m;
if(end_row > m) end_row = m;
int start_edge_id = partition_start[p];
int end_edge_id = partition_start[p+1];
int partition_nnz = end_edge_id - start_edge_id;
// For each 32 partition
int npartitions = ((end_row-start_row) + 31) / 32;
int * borders = new int[npartitions+1];
int current_partition = 0;
for(int eid = start_edge_id ; eid < end_edge_id ; eid++)
{
int new_partition = (ia[eid] - start_row - 1) / 32;
while(current_partition <= new_partition)
{
borders[current_partition] = eid;
current_partition++;
}
}
while(current_partition <= npartitions)
{
borders[current_partition] = end_edge_id;
current_partition++;
}
std::vector<partition_bin_coosimd32> bins = std::vector<partition_bin_coosimd32>(npartitions);
int n_full32 = 0;
for(int bin = 0 ; bin < npartitions ; bin++)
{
bins[bin].p32 = bin;
bins[bin].num_total = bins[bin].num_left = borders[bin+1]-borders[bin];
bins[bin].num_taken = 0;
if(bins[bin].num_total > 0) n_full32++;
}
// Sort bins for heuristic
std::sort(bins.begin(), bins.end(),
[](partition_bin_coosimd32 const & bin1, partition_bin_coosimd32 const & bin2) -> bool { return bin1.num_total > bin2.num_total; });
// Round robin assignment
int nnzsimd = 0;
while(n_full32 >= 32)
{
int rotation_count = 0;
for(int bin = 0 ; bin < npartitions ; bin++)
{
if(bins[bin].num_left > 0)
{
int eid = borders[bins[bin].p32] + bins[bin].num_taken;
bins[bin].num_taken++;
bins[bin].num_left--;
if(bins[bin].num_left == 0) n_full32--;
// Copy edge
ja[nnzsimd+partition_start[p]] = tmpedges[eid].dst;
ia[nnzsimd+partition_start[p]] = tmpedges[eid].src;
a[nnzsimd+partition_start[p]] = tmpedges[eid].val;
nnzsimd++;
rotation_count++;
}
if(rotation_count == 32) break;
}
}
simd_nnz[p] = nnzsimd;
int nnzincrement = nnzsimd;
for(int bin = 0 ; bin < npartitions ; bin++)
{
for(int taken_cnt = bins[bin].num_taken ; taken_cnt < bins[bin].num_total ; taken_cnt++)
{
int eid = borders[bins[bin].p32] + taken_cnt;
// Copy edge
ja[nnzincrement+partition_start[p]] = tmpedges[eid].dst;
ia[nnzincrement+partition_start[p]] = tmpedges[eid].src;
a[nnzincrement+partition_start[p]] = tmpedges[eid].val;
nnzincrement++;
}
}
if(nnzincrement != partition_nnz)
{
std::cout << "nnzincrement: " << nnzincrement << "\t partition_nnz: " << partition_nnz << std::endl;
exit(0);
}
assert(nnzincrement == (partition_start[p+1]-partition_start[p]));
delete [] borders;
}
#ifdef __DEBUG
unsigned int total_nnz_simd = 0;
for(int p = 0 ; p < num_partitions; p++)
{
total_nnz_simd += simd_nnz[p];
}
std::cout << "COOSIMD32 SIMD precentage: " << ((double) total_nnz_simd) / ((double)nnz) << std::endl;
// Check against edgelist
tedge_t<T> * check_edges = new tedge_t<T>[nnz];
for(int nzid = 0 ; nzid < nnz ; nzid++)
{
check_edges[nzid].dst = ja[nzid];
check_edges[nzid].src = ia[nzid];
check_edges[nzid].val = a[nzid];
check_edges[nzid].tile_id = (ia[nzid]-1) / 32;
}
__gnu_parallel::sort(check_edges, check_edges+nnz, compare_notrans_coosimd32<T>);
#pragma omp parallel
for(int i = 0 ; i < nnz ; i++)
{
assert(tmpedges[i].dst == check_edges[i].dst);
assert(tmpedges[i].src == check_edges[i].src);
//assert(tmpedges[i].val == check_edges[i].val); // commented now in case of duplicate edges
}
delete [] check_edges;
#pragma omp parallel for
for(int p = 0 ; p < num_partitions ; p++) {
assert(simd_nnz[p] % 32 == 0);
assert(simd_nnz[p] <= (partition_start[p+1] - partition_start[p]));
for(int i32 = 0 ; i32 < simd_nnz[p] ; i32+=32) {
for(int i = 0 ; i < 32 ; i++) {
for(int j = i+1 ; j < 32 ; j++) {
int partition_i = (ia[partition_start[p] + i32 + i]-1) / 32;
int partition_j = (ia[partition_start[p] + i32 + j]-1) / 32;
assert(partition_i != partition_j);
}
}
}
}
#endif // __DEBUG
//delete [] tmpedges;
_mm_free(tmpedges);
}
}
bool isEmpty() const { return nnz <= 0; }
void get_edges(edge_t<T>* edges, int row_start, int col_start) {
int nnzcnt = 0;
#pragma omp parallel for
for (uint64_t i = 0; i < (uint64_t)nnz; i++) {
edges[i].val = a[i];
edges[i].dst = ja[i] + col_start;
edges[i].src = ia[i] + row_start;
}
}
COOSIMD32Tile& operator=(COOSIMD32Tile other) {
this->name = other.name;
this->m = other.m;
this->n = other.n;
this->nnz = other.nnz;
this->a = other.a;
this->ia = other.ia;
this->ja = other.ja;
this->num_partitions = other.num_partitions;
this->partition_start = other.partition_start;
this->simd_nnz = other.simd_nnz;
}
~COOSIMD32Tile(void) {
if (!isEmpty()) {
_mm_free(a);
_mm_free(ja);
_mm_free(ia);
delete [] partition_start;
delete [] simd_nnz;
}
nnz = 0;
}
};
#endif // SRC_COOSIMD32TILE_H_
|
GB_unop__round_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fp64_fp64)
// op(A') function: GB (_unop_tran__round_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = round (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = round (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = round (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = round (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = round (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_pack4to8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4to8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4to8, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 8b-4a-inch/4a-outch/8b
kernel_tm_pack4to8.create(8*4, inch / 4, outch / 8, (size_t)2u, 1);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
const float* k4 = (const float*)kernel + (q + 4) * inch;
const float* k5 = (const float*)kernel + (q + 5) * inch;
const float* k6 = (const float*)kernel + (q + 6) * inch;
const float* k7 = (const float*)kernel + (q + 7) * inch;
__fp16* g0 = kernel_tm_pack4to8.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
g0[0] = (__fp16)k0[i];
g0[1] = (__fp16)k1[i];
g0[2] = (__fp16)k2[i];
g0[3] = (__fp16)k3[i];
g0[4] = (__fp16)k4[i];
g0[5] = (__fp16)k5[i];
g0[6] = (__fp16)k6[i];
g0[7] = (__fp16)k7[i];
g0 += 8;
}
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
}
}
}
static void conv1x1s1_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const __fp16* bias = _bias;
// interleave
Mat tmp;
if (size >= 8)
tmp.create(8, inch, size / 8 + size % 8, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start = 0;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 4;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
// transpose 4x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 4;
__fp16* tmpptr = tmp.channel(i / 8 + i % 8);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += bottom_blob.cstep * 4;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"mov v20.16b, v16.16b \n"
"mov v21.16b, v16.16b \n"
"mov v22.16b, v16.16b \n"
"mov v23.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + i % 8);
const __fp16* kptr = kernel.channel(p);
float16x8_t _sum0 = vld1q_f16(biasptr);
int q = 0;
for (; q < inch; q++)
{
float16x4_t _r0 = vld1_f16(tmpptr);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
_sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3);
kptr += 32;
tmpptr += 4;
}
vst1q_f16(outptr0, _sum0);
outptr0 += 8;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const __fp16 bias0 = bias ? bias[p] : 0.f;
//
// __fp16* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// __fp16 sum = bias0;
//
// const __fp16* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const __fp16* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
float16x4_t _v2 = vld1_f16(r0 + 16);
float16x4_t _v3 = vld1_f16(r0 + 24);
vst1_f16(outptr, _v0);
vst1_f16(outptr + 4, _v1);
vst1_f16(outptr + 8, _v2);
vst1_f16(outptr + 12, _v3);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
vst1_f16(outptr, _v0);
vst1_f16(outptr + 4, _v1);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
float16x4_t _v = vld1_f16(r0);
vst1_f16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireMagickMemory(sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"median",6) == 0)
{
double
median;
(void) GetImageMedian(image,&median,exception);
statistic=median;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*artifact,
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"median",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
artifact=GetImageArtifact(image,symbol);
if (artifact != (const char *) NULL)
return(StringToDouble(artifact,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
GB_unop__lnot_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_uint64_uint64
// op(A') function: GB_unop_tran__lnot_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_uint64_uint64
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
RefCounted.h | #ifndef MiscLib__REFCOUNTED_HEADER__
#define MiscLib__REFCOUNTED_HEADER__
#ifdef DOPARALLEL
#include <omp.h>
#endif
namespace MiscLib
{
template< class T >
class RefCounted
: public T
{
public:
RefCounted()
: m_refCount(1)
{}
RefCounted(const RefCounted< T > &r)
: T(r)
, m_refCount(1)
{
// do not copy the ref count!
}
unsigned int AddRef() const
{
#ifdef DOPARALLEL
#pragma omp atomic
#endif
++m_refCount;
return m_refCount;
}
unsigned int Release() const
{
if(m_refCount == 1)
{
#ifdef DOPARALLEL
#pragma omp critical
#endif
{
if(m_refCount)
{
m_refCount = 0;
delete this;
}
}
return 0;
}
#ifdef DOPARALLEL
#pragma omp atomic
#endif
--m_refCount;
return m_refCount;
}
RefCounted &operator=(const RefCounted &r)
{
*((T *)this) = r;
return *this; // do not copy the ref count!
}
protected:
virtual ~RefCounted()
{}
private:
mutable unsigned int m_refCount;
};
};
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.