source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
shortcut_layer.c | #include "shortcut_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include <stdio.h>
#include <assert.h>
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr,"Shortcut Layer: %d\n", index);
layer l = { (LAYER_TYPE)0 };
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
if(w != w2 || h != h2 || c != c) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2);
l.index = index;
l.delta = (float*)calloc(l.outputs * batch, sizeof(float));
l.output = (float*)calloc(l.outputs * batch, sizeof(float));
l.forward = forward_shortcut_layer;
l.backward = backward_shortcut_layer;
#ifdef GPU
l.forward_gpu = forward_shortcut_layer_gpu;
l.backward_gpu = backward_shortcut_layer_gpu;
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
#endif
return l;
}
void resize_shortcut_layer(layer *l, int w, int h)
{
//assert(l->w == l->out_w);
//assert(l->h == l->out_h);
l->w = l->out_w = w;
l->h = l->out_h = h;
l->outputs = w*h*l->out_c;
l->inputs = l->outputs;
l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
cuda_free(l->delta_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
#endif
}
void forward_shortcut_layer(const layer l, network_state state)
{
if (l.w == l.out_w && l.h == l.out_h && l.c == l.out_c) {
int size = l.batch * l.w * l.h * l.c;
int i;
#pragma omp parallel for
for(i = 0; i < size; ++i)
l.output[i] = state.input[i] + state.net.layers[l.index].output[i];
}
else {
copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
shortcut_cpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer(const layer l, network_state state)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
}
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
#endif
|
GB_binop__land_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int32
// A.*B function (eWiseMult): GB_AemultB__land_int32
// A*D function (colscale): GB_AxD__land_int32
// D*A function (rowscale): GB_DxB__land_int32
// C+=B function (dense accum): GB_Cdense_accumB__land_int32
// C+=b function (dense accum): GB_Cdense_accumb__land_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int32
// C=scalar+B GB_bind1st__land_int32
// C=scalar+B' GB_bind1st_tran__land_int32
// C=A+scalar GB_bind2nd__land_int32
// C=A'+scalar GB_bind2nd_tran__land_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT32 || GxB_NO_LAND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pow_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int16)
// C=scalar+B GB (_bind1st__pow_int16)
// C=scalar+B' GB (_bind1st_tran__pow_int16)
// C=A+scalar GB (_bind2nd__pow_int16)
// C=A'+scalar GB (_bind2nd_tran__pow_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_pow_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_pow_int16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_pow_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_pow_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _sum = (v4f32)__msa_fill_w(0);
if (bias_data_ptr)
{
_sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0);
}
const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
v4f32 _val = (v4f32)__msa_fill_w_f32(val);
v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
}
}
kptr += maxk * 4;
}
_sum = activation_ps(_sum, activation_type, activation_params);
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
}
outptr += outw * 4;
}
}
}
|
GB_unaryop__lnot_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_int16
// op(A') function: GB_tran__lnot_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_int8_int8
// op(A') function: GB_unop_tran__minv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int32)
// A*D function (colscale): GB (_AxD__lor_int32)
// D*A function (rowscale): GB (_DxB__lor_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int32)
// C=scalar+B GB (_bind1st__lor_int32)
// C=scalar+B' GB (_bind1st_tran__lor_int32)
// C=A+scalar GB (_bind2nd__lor_int32)
// C=A'+scalar GB (_bind2nd_tran__lor_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
OnDiscMSExperiment.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2016.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#ifndef OPENMS_KERNEL_ONDISCMSEXPERIMENT_H
#define OPENMS_KERNEL_ONDISCMSEXPERIMENT_H
#include <OpenMS/INTERFACES/DataStructures.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/FORMAT/IndexedMzMLFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <vector>
#include <algorithm>
#include <limits>
#include <boost/shared_ptr.hpp>
namespace OpenMS
{
/**
@brief Representation of a mass spectrometry experiment on disk.
@ingroup Kernel
@note This implementation is @a not thread-safe since it keeps internally a
single file access pointer which it moves when accessing a specific
data item. Please provide a separate copy to each thread, e.g.
@code
#pragma omp parallel for firstprivate(ondisc_map)
@endcode
*/
template <typename PeakT = Peak1D, typename ChromatogramPeakT = ChromatogramPeak>
class OnDiscMSExperiment
{
public:
/**
@brief Constructor
This initializes the object, use openFile to open a file.
*/
OnDiscMSExperiment() {}
/**
@brief Open a specific file on disk.
This tries to read the indexed mzML by parsing the index and then reading
the meta information into memory.
@return Whether the parsing of the file was successful (if false, the
file most likely was not an indexed mzML file)
*/
bool openFile(const String& filename, bool skipMetaData = false)
{
filename_ = filename;
indexed_mzml_file_.openFile(filename);
if (filename != "" && !skipMetaData)
{
loadMetaData_(filename);
}
return indexed_mzml_file_.getParsingSuccess();
}
/// Copy constructor
OnDiscMSExperiment(const OnDiscMSExperiment& source) :
filename_(source.filename_),
indexed_mzml_file_(source.indexed_mzml_file_),
meta_ms_experiment_(source.meta_ms_experiment_)
{
}
/**
@brief Equality operator
This only checks whether the underlying file is the same and the parsed
meta-information is the same. Note that the file reader (e.g. the
std::ifstream of the file) might be in a different state.
*/
bool operator==(const OnDiscMSExperiment& rhs) const
{
// check if file and meta information is the same
return filename_ == rhs.filename_ &&
(*meta_ms_experiment_) == (*rhs.meta_ms_experiment_);
// do not check if indexed_mzml_file_ is equal -> they have the same filename...
}
/// Inequality operator
bool operator!=(const OnDiscMSExperiment& rhs) const
{
return !(operator==(rhs));
}
/**
@brief Checks if all spectra are sorted with respect to ascending RT
Note that we cannot check whether all spectra are sorted (except if we
were to load them all and check).
*/
bool isSortedByRT() const
{
return meta_ms_experiment_->isSorted(false);
}
/// alias for getNrSpectra
inline Size size() const
{
return getNrSpectra();
}
/// returns whether spectra are empty
inline bool empty() const
{
return indexed_mzml_file_.getNrSpectra() == 0;
}
/// get the total number of spectra available
inline Size getNrSpectra() const
{
return indexed_mzml_file_.getNrSpectra();
}
/// get the total number of chromatograms available
inline Size getNrChromatograms() const
{
return indexed_mzml_file_.getNrChromatograms();
}
/// returns the meta information of this experiment (const access)
boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const
{
return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_);
}
/// alias for getSpectrum
inline MSSpectrum<PeakT> operator[](Size n)
{
return getSpectrum(n);
}
/**
@brief returns a single spectrum
TODO: make this more efficient by reducing the copying
*/
MSSpectrum<PeakT> getSpectrum(Size id)
{
OpenMS::Interfaces::SpectrumPtr sptr = indexed_mzml_file_.getSpectrumById(static_cast<int>(id));
MSSpectrum<PeakT> spectrum(meta_ms_experiment_->operator[](id));
// recreate a spectrum from the data arrays!
OpenMS::Interfaces::BinaryDataArrayPtr mz_arr = sptr->getMZArray();
OpenMS::Interfaces::BinaryDataArrayPtr int_arr = sptr->getIntensityArray();
spectrum.reserve(mz_arr->data.size());
for (Size i = 0; i < mz_arr->data.size(); i++)
{
PeakT p;
p.setMZ(mz_arr->data[i]);
p.setIntensity(int_arr->data[i]);
spectrum.push_back(p);
}
return spectrum;
}
/**
@brief returns a single spectrum
*/
OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id)
{
return indexed_mzml_file_.getSpectrumById(id);
}
/**
@brief returns a single chromatogram
TODO: make this more efficient by reducing the copying
*/
MSChromatogram<ChromatogramPeakT> getChromatogram(Size id)
{
OpenMS::Interfaces::ChromatogramPtr cptr = indexed_mzml_file_.getChromatogramById(static_cast<int>(id));
MSChromatogram<ChromatogramPeakT> chromatogram(meta_ms_experiment_->getChromatogram(id));
// recreate a chromatogram from the data arrays!
OpenMS::Interfaces::BinaryDataArrayPtr rt_arr = cptr->getTimeArray();
OpenMS::Interfaces::BinaryDataArrayPtr int_arr = cptr->getIntensityArray();
chromatogram.reserve(rt_arr->data.size());
for (Size i = 0; i < rt_arr->data.size(); i++)
{
ChromatogramPeakT p;
p.setRT(rt_arr->data[i]);
p.setIntensity(int_arr->data[i]);
chromatogram.push_back(p);
}
return chromatogram;
}
/**
@brief returns a single chromatogram
*/
OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id)
{
return indexed_mzml_file_.getChromatogramById(id);
}
///sets whether to skip some XML checks and be fast instead
void setSkipXMLChecks(bool skip)
{
indexed_mzml_file_.setSkipXMLChecks(skip);
}
private:
/// Private Assignment operator -> we cannot copy file streams in IndexedMzMLFile
OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */) {}
void loadMetaData_(const String& filename)
{
meta_ms_experiment_ = boost::shared_ptr< MSExperiment<> >(new MSExperiment<>);
MzMLFile f;
PeakFileOptions options = f.getOptions();
options.setFillData(false);
f.setOptions(options);
f.load(filename, *meta_ms_experiment_.get());
}
protected:
/// The filename of the underlying data file
String filename_;
/// The index of the underlying data file
IndexedMzMLFile indexed_mzml_file_;
/// The meta-data
boost::shared_ptr<MSExperiment<> > meta_ms_experiment_;
};
} // namespace OpenMS
#endif // OPENMS_KERNEL_ONDISCMSEXPERIMENT_H
|
GB_selector.c | //------------------------------------------------------------------------------
// GB_selector: select entries from a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// GB_selector does the work for GB_select and the GxB_*select methods. It
// also deletes zombies for GB_wait using the NONZOMBIE operator, and deletes
// entries outside a smaller matrix for GxB_*resize.
// TODO: GB_selector does not exploit the mask.
// If C is NULL on input, A is modified in-place.
// Otherwise, C is an uninitialized static header.
#include "GB_select.h"
#include "GB_ek_slice.h"
#include "GB_sel__include.h"
#include "GB_scalar.h"
#include "GB_transpose.h"
#define GB_FREE_WORKSPACE \
{ \
GB_FREE_WORK (&Zp, Zp_size) ; \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
GB_FREE (&Cp, Cp_size) ; \
GB_FREE (&Ch, Ch_size) ; \
GB_FREE (&Ci, Ci_size) ; \
GB_FREE (&Cx, Cx_size) ; \
}
#define GB_FREE_ALL \
{ \
GB_phbix_free (C) ; \
GB_FREE_WORKSPACE ; \
}
GrB_Info GB_selector
(
GrB_Matrix C, // output matrix, NULL or existing header
GB_Opcode opcode, // selector opcode
const GB_Operator op, // user operator, NULL for resize/nonzombie
const bool flipij, // if true, flip i and j for user operator
GrB_Matrix A, // input matrix
int64_t ithunk, // (int64_t) Thunk, if Thunk is NULL
const GrB_Scalar Thunk, // optional input for select operator
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT_OP_OK_OR_NULL (op, "selectop/idxunop for GB_selector", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (Thunk, "Thunk for GB_selector", GB0) ;
ASSERT (GB_IS_SELECTOP_CODE (opcode) || GB_IS_INDEXUNARYOP_CODE (opcode)) ;
ASSERT_MATRIX_OK (A, "A input for GB_selector", GB_FLIP (GB0)) ;
// positional selector (tril, triu, diag, offdiag, resize, rowindex, ...):
// can't be jumbled. nonzombie, entry-valued op, user op: jumbled OK
ASSERT (GB_IMPLIES (GB_OPCODE_IS_POSITIONAL (opcode), !GB_JUMBLED (A))) ;
ASSERT (C == NULL || (C != NULL && (C->static_header || GBNSTATIC))) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
bool in_place_A = (C == NULL) ; // GrB_wait and GB_resize only
int64_t *restrict Zp = NULL ; size_t Zp_size = 0 ;
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
const bool A_iso = A->iso ;
int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ;
int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ;
GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ;
//--------------------------------------------------------------------------
// get Thunk
//--------------------------------------------------------------------------
// The scalar value of Thunk has already been typecasted to an integer
// (int64_t ithunk).
// It is also now typecast to the same type as A (to the scalar athunk)
// which is required for GxB_SelectOps, and to the op->ytype (the scalar
// ythunk) for GrB_IndexUnaryOps.
// If Thunk is NULL, or has no entry, it is treated as a scalar value
// of zero.
const size_t asize = A->type->size ;
const GB_Type_code acode = A->type->code ;
GrB_Type ytype = NULL, xtype = NULL ;
GB_Type_code ycode = GB_ignore_code, xcode = GB_ignore_code ;
size_t ysize = 1, xsize = 1 ;
if (op != NULL)
{
if (op->ytype != NULL)
{
// get the type of the thunk input of the operator
ytype = op->ytype ;
ycode = ytype->code ;
ysize = ytype->size ;
}
if (op->xtype != NULL)
{
// get the type of the A input of the operator
xtype = op->xtype ;
xcode = xtype->code ;
xsize = xtype->size ;
}
}
// athunk = (A->type) Thunk, for selectop thunk comparators only
GB_void athunk [GB_VLA(asize)] ;
memset (athunk, 0, asize) ;
// ythunk = (op->ytype) Thunk, for idxnunop
GB_void ythunk [GB_VLA(ysize)] ;
memset (ythunk, 0, ysize) ;
bool op_is_selectop = GB_IS_SELECTOP_CODE (opcode) ;
bool op_is_idxunop = GB_IS_INDEXUNARYOP_CODE (opcode) ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
if (Thunk != NULL)
{
// Thunk is passed to GB_selector only if it is non-empty
ASSERT (GB_nnz ((GrB_Matrix) Thunk) > 0) ;
const GB_Type_code tcode = Thunk->type->code ;
if (op_is_selectop && opcode != GB_USER_selop_code)
{
// athunk = (atype) Thunk, for built-in GxB_SelectOps only
GB_cast_scalar (athunk, acode, Thunk->x, tcode, asize) ;
}
if (ytype != NULL)
{
// ythunk = (op->ytype) Thunk
GB_cast_scalar (ythunk, ycode, Thunk->x, tcode, ysize) ;
}
}
//--------------------------------------------------------------------------
// handle iso case for built-in select ops that depend only on the value
//--------------------------------------------------------------------------
bool op_is_select_valued =
opcode >= GB_NONZERO_selop_code && opcode <= GB_LE_THUNK_selop_code ;
bool op_is_idxunop_valued =
opcode >= GB_VALUENE_idxunop_code && opcode <= GB_VALUELE_idxunop_code ;
if (A_iso && (op_is_select_valued || op_is_idxunop_valued))
{
// select op is NONZERO, EQ_ZERO, GT_ZERO, GE_ZERO, LT_ZERO, LE_ZERO,
// EQ_THUNK, GT_THUNK, GE_THUNK, LT_THUNK, or LE_THUNK, or the idxunop
// VALUE* operators. All of these select/idxunop ops depend only on
// the value of A(i,j). Since A is iso, either all entries in A will
// be copied to C and thus C can be created as a shallow copy of A, or
// no entries from A will be copied to C and thus C is an empty matrix.
// The select factory is not needed, except to check the iso value via
// GB_bitmap_selector.
ASSERT (!in_place_A) ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
// construct a scalar containing the iso scalar of A
// xscalar = (op->xtype) A->x for idxunops
GB_void xscalar [GB_VLA(xsize)] ;
memset (xscalar, 0, xsize) ;
struct GB_Scalar_opaque S_header ;
GrB_Scalar S ;
if (op_is_select_valued)
{
// wrap the iso-value of A in the scalar S, with no typecasting
S = GB_Scalar_wrap (&S_header, A->type, A->x) ;
}
else
{
// wrap the iso-value of A in the scalar S, typecasted to xtype
// xscalar = (op->xtype) A->x
GB_cast_scalar (xscalar, xcode, A->x, acode, asize) ;
S = GB_Scalar_wrap (&S_header, xtype, xscalar) ;
}
S->iso = false ; // but ensure S is not iso
ASSERT_SCALAR_OK (S, "iso scalar wrap", GB0) ;
// apply the select operator to the iso scalar S
GB_OK (GB_bitmap_selector (C, false, opcode, op, false,
(GrB_Matrix) S, ithunk, athunk, ythunk, Context)) ;
ASSERT_MATRIX_OK (C, "C from iso scalar test", GB0) ;
bool C_empty = (GB_nnz (C) == 0) ;
GB_phbix_free (C) ;
// check if C has 0 or 1 entry
if (C_empty)
{
// C is an empty matrix
return (GB_new (&C, // existing header
A->type, avlen, avdim, GB_Ap_calloc, true,
GxB_SPARSE + GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ),
1, Context)) ;
}
else
{
// C is a shallow copy of A with all the same entries as A
// set C->iso = A->iso OK
return (GB_shallow_copy (C, true, A, Context)) ;
}
}
// now if A is iso, the following operators still need to be handled:
// GB_TRIL_selop_code : use GB_sel__tril_iso
// GB_TRIU_selop_code : use GB_sel__triu_iso
// GB_DIAG_selop_code : use GB_sel__diag_iso
// GB_OFFDIAG_selop_code : use GB_sel__offdiag_iso
// GB_NONZOMBIE_selop_code : use GB_sel__nonzombie_iso
// GB_USER_selop_code : use GB_sel__user_iso
// GB_ROWINDEX_idxunop_code : use GB_sel__rowindex_iso
// GB_ROWLE_idxunop_code : use GB_sel__rowle_iso
// GB_ROWGT_idxunop_code : use GB_sel__rowle_iso
// all other idxunop : use GB_sel__idxunop_iso
// column selectors are handled below:
// GB_COLINDEX_idxunop_code :
// GB_COLLE_idxunop_code :
// GB_COLGT_idxunop_code :
// Except for GB_USER_selop_code and idxunop, the GB_sel__*_iso methods do
// not access the values of A and C, just the pattern.
//--------------------------------------------------------------------------
// handle the bitmap/as-if-full case
//--------------------------------------------------------------------------
bool use_bitmap_selector ;
if (opcode == GB_NONZOMBIE_selop_code || in_place_A)
{
// GB_bitmap_selector does not support the nonzombie opcode, nor does
// it support operating on A in place. For the NONZOMBIE operator, A
// will never be bitmap.
use_bitmap_selector = false ;
}
else if (opcode == GB_DIAG_selop_code)
{
// GB_bitmap_selector supports the DIAG operator, but it is currently
// not efficient (GB_bitmap_selector should return a sparse diagonal
// matrix, not bitmap). So use the sparse case if A is not bitmap,
// since the sparse case below does not support the bitmap case.
use_bitmap_selector = GB_IS_BITMAP (A) ;
}
else
{
// For bitmap, full, or as-if-full matrices (sparse/hypersparse with
// all entries present, not jumbled, no zombies, and no pending
// tuples), use the bitmap selector for all other operators (TRIL,
// TRIU, OFFDIAG, NONZERO, EQ*, GT*, GE*, LT*, LE*, and user-defined
// operators).
use_bitmap_selector = GB_IS_BITMAP (A) || GB_as_if_full (A) ;
}
//--------------------------------------------------------------------------
// determine if C is iso for a non-iso A
//--------------------------------------------------------------------------
bool C_iso = A_iso || // C iso value is Ax [0]
(opcode == GB_EQ_ZERO_selop_code) || // C iso value is zero
(opcode == GB_EQ_THUNK_selop_code) || // C iso value is thunk
(opcode == GB_NONZERO_selop_code &&
acode == GB_BOOL_code) ; // C iso value is true
if (C_iso)
{
GB_BURBLE_MATRIX (A, "(iso select) ") ;
}
//==========================================================================
// bitmap/full case
//==========================================================================
if (use_bitmap_selector)
{
GB_BURBLE_MATRIX (A, "(bitmap select) ") ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
return (GB_bitmap_selector (C, C_iso, opcode, op,
flipij, A, ithunk, athunk, ythunk, Context)) ;
}
//==========================================================================
// sparse/hypersparse case
//==========================================================================
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// get A: sparse, hypersparse, or full
//--------------------------------------------------------------------------
// the case when A is bitmap is always handled above by GB_bitmap_selector
ASSERT (!GB_IS_BITMAP (A)) ;
int64_t *restrict Ap = A->p ; size_t Ap_size = A->p_size ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ai = A->i ; size_t Ai_size = A->i_size ;
GB_void *restrict Ax = (GB_void *) A->x ; size_t Ax_size = A->x_size ;
int64_t anvec = A->nvec ;
bool A_jumbled = A->jumbled ;
bool A_is_hyper = (Ah != NULL) ;
//==========================================================================
// column selector
//==========================================================================
// The column selectors can be done in a single pass.
if (opcode == GB_COLINDEX_idxunop_code ||
opcode == GB_COLLE_idxunop_code ||
opcode == GB_COLGT_idxunop_code)
{
//----------------------------------------------------------------------
// find column j in A
//----------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A for col selector", GB_FLIP (GB0)) ;
int nth = nthreads_max ;
ASSERT (!in_place_A) ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
int64_t j = (opcode == GB_COLINDEX_idxunop_code) ? (-ithunk) : ithunk ;
int64_t k = 0 ;
bool found ;
if (j < 0)
{
// j is outside the range of columns of A
k = 0 ;
found = false ;
}
else if (j >= avdim)
{
// j is outside the range of columns of A
k = anvec ;
found = false ;
}
else if (A_is_hyper)
{
// find the column j in the hyperlist of A
int64_t kright = anvec-1 ;
GB_SPLIT_BINARY_SEARCH (j, Ah, k, kright, found) ;
// if found is true the Ah [k] == j
// if found is false, then Ah [0..k-1] < j and Ah [k..anvec-1] > j
}
else
{
// j appears as the jth column in A; found is always true
k = j ;
found = true ;
}
//----------------------------------------------------------------------
// determine the # of entries and # of vectors in C
//----------------------------------------------------------------------
int64_t pstart = Ap [k] ;
int64_t pend = found ? Ap [k+1] : pstart ;
int64_t ajnz = pend - pstart ;
int64_t cnz, cnvec ;
int64_t anz = Ap [anvec] ;
if (opcode == GB_COLINDEX_idxunop_code)
{
// COLINDEX: delete column j: C = A (:, [0:j-1 j+1:end])
cnz = anz - ajnz ;
cnvec = (A_is_hyper && found) ? (anvec-1) : anvec ;
}
else if (opcode == GB_COLLE_idxunop_code)
{
// COLLE: C = A (:, 0:j)
cnz = pend ;
cnvec = (A_is_hyper) ? (found ? (k+1) : k) : anvec ;
}
else // (opcode == GB_COLGT_idxunop_code)
{
// COLGT: C = A (:, j+1:end)
cnz = anz - pend ;
cnvec = anvec - ((A_is_hyper) ? (found ? (k+1) : k) : 0) ;
}
if (cnz == anz)
{
// C is the same as A: return it a pure shallow copy
return (GB_shallow_copy (C, true, A, Context)) ;
}
else if (cnz == 0)
{
// return C as empty
return (GB_new (&C, // auto (sparse or hyper), existing header
A->type, avlen, avdim, GB_Ap_calloc, true,
GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ), 1, Context)) ;
}
//----------------------------------------------------------------------
// allocate C
//----------------------------------------------------------------------
int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ;
GB_OK (GB_new_bix (&C, // sparse or hyper (from A), existing header
A->type, avlen, avdim, GB_Ap_malloc, true, sparsity, false,
A->hyper_switch, cnvec, cnz, true, A_iso, Context)) ;
ASSERT (info == GrB_SUCCESS) ;
int nth2 = GB_nthreads (cnvec, chunk, nth) ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = (GB_void *) C->x ;
int64_t kk ;
//----------------------------------------------------------------------
// construct C
//----------------------------------------------------------------------
if (A_iso)
{
// Cx [0] = Ax [0]
memcpy (Cx, Ax, asize) ;
}
if (opcode == GB_COLINDEX_idxunop_code)
{
//------------------------------------------------------------------
// COLINDEX: delete the column j
//------------------------------------------------------------------
if (A_is_hyper)
{
ASSERT (found) ;
// Cp [0:k-1] = Ap [0:k-1]
GB_memcpy (Cp, Ap, k * sizeof (int64_t), nth) ;
// Cp [k:cnvec] = Ap [k+1:anvec] - ajnz
#pragma omp parallel for num_threads(nth2)
for (kk = k ; kk <= cnvec ; kk++)
{
Cp [kk] = Ap [kk+1] - ajnz ;
}
// Ch [0:k-1] = Ah [0:k-1]
GB_memcpy (Ch, Ah, k * sizeof (int64_t), nth) ;
// Ch [k:cnvec-1] = Ah [k+1:anvec-1]
GB_memcpy (Ch + k, Ah + (k+1), (cnvec-k) * sizeof (int64_t),
nth) ;
}
else
{
// Cp [0:k] = Ap [0:k]
GB_memcpy (Cp, Ap, (k+1) * sizeof (int64_t), nth) ;
// Cp [k+1:anvec] = Ap [k+1:anvec] - ajnz
#pragma omp parallel for num_threads(nth2)
for (kk = k+1 ; kk <= cnvec ; kk++)
{
Cp [kk] = Ap [kk] - ajnz ;
}
}
// Ci [0:pstart-1] = Ai [0:pstart-1]
GB_memcpy (Ci, Ai, pstart * sizeof (int64_t), nth) ;
// Ci [pstart:cnz-1] = Ai [pend:anz-1]
GB_memcpy (Ci + pstart, Ai + pend,
(cnz - pstart) * sizeof (int64_t), nth) ;
if (!A_iso)
{
// Cx [0:pstart-1] = Ax [0:pstart-1]
GB_memcpy (Cx, Ax, pstart * asize, nth) ;
// Cx [pstart:cnz-1] = Ax [pend:anz-1]
GB_memcpy (Cx + pstart * asize, Ax + pend * asize,
(cnz - pstart) * asize, nth) ;
}
}
else if (opcode == GB_COLLE_idxunop_code)
{
//------------------------------------------------------------------
// COLLE: C = A (:, 0:j)
//------------------------------------------------------------------
if (A_is_hyper)
{
// Cp [0:cnvec] = Ap [0:cnvec]
GB_memcpy (Cp, Ap, (cnvec+1) * sizeof (int64_t), nth) ;
// Ch [0:cnvec-1] = Ah [0:cnvec-1]
GB_memcpy (Ch, Ah, (cnvec) * sizeof (int64_t), nth) ;
}
else
{
// Cp [0:k+1] = Ap [0:k+1]
ASSERT (found) ;
GB_memcpy (Cp, Ap, (k+2) * sizeof (int64_t), nth) ;
// Cp [k+2:cnvec] = cnz
#pragma omp parallel for num_threads(nth2)
for (kk = k+2 ; kk <= cnvec ; kk++)
{
Cp [kk] = cnz ;
}
}
// Ci [0:cnz-1] = Ai [0:cnz-1]
GB_memcpy (Ci, Ai, cnz * sizeof (int64_t), nth) ;
if (!A_iso)
{
// Cx [0:cnz-1] = Ax [0:cnz-1]
GB_memcpy (Cx, Ax, cnz * asize, nth) ;
}
}
else // (opcode == GB_COLGT_idxunop_code)
{
//------------------------------------------------------------------
// COLGT: C = A (:, j+1:end)
//------------------------------------------------------------------
if (A_is_hyper)
{
// Cp [0:cnvec] = Ap [k+found:anvec] - pend
#pragma omp parallel for num_threads(nth2)
for (kk = 0 ; kk <= cnvec ; kk++)
{
Cp [kk] = Ap [kk + k + found] - pend ;
}
// Ch [0:cnvec-1] = Ah [k+found:anvec-1]
GB_memcpy (Ch, Ah + k + found, cnvec * sizeof (int64_t), nth) ;
}
else
{
ASSERT (found) ;
// Cp [0:k] = 0
GB_memset (Cp, 0, (k+1) * sizeof (int64_t), nth) ;
// Cp [k+1:cnvec] = Ap [k+1:cnvec] - pend
#pragma omp parallel for num_threads(nth2)
for (kk = k+1 ; kk <= cnvec ; kk++)
{
Cp [kk] = Ap [kk] - pend ;
}
}
// Ci [0:cnz-1] = Ai [pend:anz-1]
GB_memcpy (Ci, Ai + pend, cnz * sizeof (int64_t), nth) ;
if (!A_iso)
{
// Cx [0:cnz-1] = Ax [pend:anz-1]
GB_memcpy (Cx, Ax + pend * asize, cnz * asize, nth) ;
}
}
//----------------------------------------------------------------------
// finalize the matrix, free workspace, and return result
//----------------------------------------------------------------------
C->nvec = cnvec ;
C->magic = GB_MAGIC ;
C->jumbled = A_jumbled ; // C is jumbled if A is jumbled
C->iso = C_iso ; // OK: burble already done above
C->nvec_nonempty = GB_nvec_nonempty (C, Context) ;
ASSERT_MATRIX_OK (C, "C output for GB_selector (column select)", GB0) ;
return (GrB_SUCCESS) ;
}
//==========================================================================
// all other select/idxunop operators
//==========================================================================
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_phbix_free (C) ; \
GB_FREE_WORKSPACE ; \
}
//--------------------------------------------------------------------------
// allocate the new vector pointers of C
//--------------------------------------------------------------------------
int64_t cnz = 0 ;
Cp = GB_CALLOC (anvec+1, int64_t, &Cp_size) ;
if (Cp == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
int A_ntasks, A_nthreads ;
double work = 8*anvec
+ ((opcode == GB_DIAG_selop_code) ? 0 : GB_nnz_held (A)) ;
GB_SLICE_MATRIX_WORK (A, 8, chunk, work) ;
//--------------------------------------------------------------------------
// allocate workspace for each task
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + A_ntasks ;
Cp_kfirst = Work + A_ntasks * 2 ;
//--------------------------------------------------------------------------
// allocate workspace for phase1
//--------------------------------------------------------------------------
// phase1 counts the number of live entries in each vector of A. The
// result is computed in Cp, where Cp [k] is the number of live entries in
// the kth vector of A. Zp [k] is the location of the A(i,k) entry, for
// positional operators.
if (op_is_positional)
{
// allocate Zp
Zp = GB_MALLOC_WORK (anvec, int64_t, &Zp_size) ;
if (Zp == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// phase1: count the live entries in each column
//--------------------------------------------------------------------------
// define the worker for the switch factory
#define GB_SELECT_PHASE1
#define GB_sel1(opname,aname) GB (_sel_phase1_ ## opname ## aname)
#define GB_SEL_WORKER(opname,aname,atype) \
{ \
GB_sel1 (opname, aname) (Zp, Cp, Wfirst, Wlast, A, \
flipij, ithunk, (atype *) athunk, ythunk, op, \
A_ek_slicing, A_ntasks, A_nthreads) ; \
} \
break ;
// launch the switch factory
const GB_Type_code typecode = (A_iso) ? GB_ignore_code : acode ;
#include "GB_select_factory.c"
#undef GB_SELECT_PHASE1
#undef GB_SEL_WORKER
//--------------------------------------------------------------------------
// cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
int64_t C_nvec_nonempty ;
GB_ek_slice_merge2 (&C_nvec_nonempty, Cp_kfirst, Cp, anvec,
Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate new space for the compacted Ci and Cx
//--------------------------------------------------------------------------
cnz = Cp [anvec] ;
cnz = GB_IMAX (cnz, 1) ;
Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ;
// use calloc since C is sparse, not bitmap
Cx = (GB_void *) GB_XALLOC (false, C_iso, cnz, asize, &Cx_size) ; // x:OK
if (Ci == NULL || Cx == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// set the iso value of C
//--------------------------------------------------------------------------
if (C_iso)
{
// The pattern of C is computed by the worker below, for the DIAG,
// OFFDIAG, TRIL, TRIU, NONZOMBIE, and USER select operators.
GB_iso_select (Cx, opcode, athunk, Ax, acode, asize) ;
}
//--------------------------------------------------------------------------
// phase2: select the entries
//--------------------------------------------------------------------------
// define the worker for the switch factory
#define GB_SELECT_PHASE2
#define GB_sel2(opname,aname) GB (_sel_phase2_ ## opname ## aname)
#define GB_SEL_WORKER(opname,aname,atype) \
{ \
GB_sel2 (opname, aname) (Ci, (atype *) Cx, Zp, Cp, Cp_kfirst, A, \
flipij, ithunk, (atype *) athunk, ythunk, op, \
A_ek_slicing, A_ntasks, A_nthreads) ; \
} \
break ;
// launch the switch factory
#include "GB_select_factory.c"
//--------------------------------------------------------------------------
// create the result
//--------------------------------------------------------------------------
if (in_place_A)
{
//----------------------------------------------------------------------
// transplant Cp, Ci, Cx back into A
//----------------------------------------------------------------------
// TODO: this is not parallel: use GB_hyper_prune
if (A->h != NULL && C_nvec_nonempty < anvec)
{
// prune empty vectors from Ah and Ap
int64_t cnvec = 0 ;
for (int64_t k = 0 ; k < anvec ; k++)
{
if (Cp [k] < Cp [k+1])
{
Ah [cnvec] = Ah [k] ;
Ap [cnvec] = Cp [k] ;
cnvec++ ;
}
}
Ap [cnvec] = Cp [anvec] ;
A->nvec = cnvec ;
ASSERT (A->nvec == C_nvec_nonempty) ;
GB_FREE (&Cp, Cp_size) ;
}
else
{
// free the old A->p and transplant in Cp as the new A->p
GB_FREE (&Ap, Ap_size) ;
A->p = Cp ; Cp = NULL ; A->p_size = Cp_size ;
A->plen = anvec ;
}
ASSERT (Cp == NULL) ;
GB_FREE (&Ai, Ai_size) ;
GB_FREE (&Ax, Ax_size) ;
A->i = Ci ; Ci = NULL ; A->i_size = Ci_size ;
A->x = Cx ; Cx = NULL ; A->x_size = Cx_size ;
A->nvec_nonempty = C_nvec_nonempty ;
A->jumbled = A_jumbled ; // A remains jumbled (in-place select)
A->iso = C_iso ; // OK: burble already done above
// the NONZOMBIE opcode may have removed all zombies, but A->nzombie
// is still nonzero. It is set to zero in GB_wait.
ASSERT_MATRIX_OK (A, "A output for GB_selector", GB_FLIP (GB0)) ;
}
else
{
//----------------------------------------------------------------------
// create C and transplant Cp, Ch, Ci, Cx into C
//----------------------------------------------------------------------
int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
info = GB_new (&C, // sparse or hyper (from A), existing header
A->type, avlen, avdim, GB_Ap_null, true,
sparsity, A->hyper_switch, anvec, Context) ;
ASSERT (info == GrB_SUCCESS) ;
if (A->h != NULL)
{
//------------------------------------------------------------------
// A and C are hypersparse: copy non-empty vectors from Ah to Ch
//------------------------------------------------------------------
Ch = GB_MALLOC (anvec, int64_t, &Ch_size) ;
if (Ch == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// TODO: do in parallel: use GB_hyper_prune
int64_t cnvec = 0 ;
for (int64_t k = 0 ; k < anvec ; k++)
{
if (Cp [k] < Cp [k+1])
{
Ch [cnvec] = Ah [k] ;
Cp [cnvec] = Cp [k] ;
cnvec++ ;
}
}
Cp [cnvec] = Cp [anvec] ;
C->nvec = cnvec ;
ASSERT (C->nvec == C_nvec_nonempty) ;
}
C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ;
C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ;
C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ;
C->x = Cx ; Cx = NULL ; C->x_size = Cx_size ;
C->plen = anvec ;
C->magic = GB_MAGIC ;
C->nvec_nonempty = C_nvec_nonempty ;
C->jumbled = A_jumbled ; // C is jumbled if A is jumbled
C->iso = C_iso ; // OK: burble already done above
ASSERT_MATRIX_OK (C, "C output for GB_selector", GB0) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
}
|
pragmatest.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
int N = atoi(argv[1]);
float sum = 0;
#pragma omp parallel for reduction( +:sum)
for (int n=0; n<N;n++)
{
sum = sum + n;
}
printf("sum = %f \n",sum);
} |
nco_rgr.c | /* $Header$ */
/* Purpose: NCO regridding utilities */
/* Copyright (C) 2015--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_rgr.h" /* Regridding */
extern double min_dbl(double a, double b);
extern double max_dbl(double a, double b);
inline double min_dbl(double a, double b){return (a < b) ? a : b;}
inline double max_dbl(double a, double b){return (a > b) ? a : b;}
int /* O [enm] Return code */
nco_rgr_ctl /* [fnc] Control regridding logic */
(rgr_sct * const rgr, /* I/O [sct] Regridding structure */
trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */
{
/* Purpose: Control regridding logic */
int rcd=NCO_NOERR;
const char fnc_nm[]="nco_rgr_ctl()";
nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */
nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */
nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */
nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */
nco_bool flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */
nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */
nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */
nco_bool flg_wgt=False; /* [flg] Regrid with external weights */
/* Main control branching occurs here
Branching complexity and utility will increase as regridding features are added */
if(rgr->flg_grd) flg_grd=True;
if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True;
if(rgr->flg_nfr) flg_nfr=True;
if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True;
if(rgr->flg_s1d) flg_s1d=True;
if(rgr->fl_vrt) flg_vrt=True;
assert(!flg_smf);
assert(!flg_tps);
/* Create SCRIP-format grid file */
if(flg_grd) rcd=nco_grd_mk(rgr);
/* Create ESMF-format map file */
if(flg_map) rcd=nco_map_mk(rgr);
/* Infer SCRIP-format grid file from data file */
if(flg_nfr) rcd=nco_grd_nfr(rgr);
/* Interpolate data file to new vertical grid */
if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl);
/* Unpack sparse-1D CLM/ELM variables into full file */
if(flg_s1d) rcd=nco_s1d_unpack(rgr,trv_tbl);
/* Regrid data horizontally using weights from mapping file */
if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl);
/* Regrid using ESMF library
20150701: On-line weight generation with ESMF never worked well and was abandoned */
if(flg_smf){
#ifdef ENABLE_ESMF
(void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm);
rcd=nco_rgr_esmf(rgr);
/* Close output and free dynamic memory */
(void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id);
#else /* !ENABLE_ESMF */
(void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
#endif /* !ENABLE_ESMF */
} /* !flg_smf */
/* Regrid using TempestRemap regridding
20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line
However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */
if(flg_tps) rcd=nco_rgr_tps(rgr);
return rcd;
} /* end nco_rgr_ctl() */
rgr_sct * /* O [sct] Pointer to free'd regridding structure */
nco_rgr_free /* [fnc] Deallocate regridding structure */
(rgr_sct *rgr) /* I/O [sct] Regridding structure */
{
/* Purpose: Free all dynamic memory in regridding structure */
/* free() standalone command-line arguments */
if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln);
if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl);
if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src);
if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst);
if(rgr->fl_hrz) rgr->fl_hrz=(char *)nco_free(rgr->fl_hrz);
if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in);
if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map);
if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh);
if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out);
if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp);
if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt);
if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm);
if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr);
/* free() strings associated with grid properties */
if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd);
if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst);
if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src);
if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl);
if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid);
/* Tempest */
if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps);
/* free() memory used to construct KVMs */
if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr);
/* free() memory copied from KVMs */
if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm);
if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm);
if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm);
if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in);
if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out);
if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm);
if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in);
if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out);
if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm);
if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in);
if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out);
if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm);
if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm);
if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in);
if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out);
if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm);
if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in);
if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out);
if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm);
if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm);
if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in);
if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm);
/* Lastly, free() regrid structure itself */
if(rgr) rgr=(rgr_sct *)nco_free(rgr);
return rgr;
} /* end nco_rgr_free() */
rgr_sct * /* O [sct] Regridding structure */
nco_rgr_ini /* [fnc] Initialize regridding structure */
(const char * const cmd_ln, /* I [sng] Command-line */
const int in_id, /* I [id] Input netCDF file ID */
char **rgr_arg, /* [sng] Regridding arguments */
const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */
char * const rgr_in, /* I [sng] File containing fields to be regridded */
char * const rgr_out, /* I [sng] File containing regridded fields */
char * const rgr_grd_src, /* I [sng] File containing input grid */
char * const rgr_grd_dst, /* I [sng] File containing destination grid */
char * const rgr_hrz, /* I [sng] File containing horizontal coordinate grid */
char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */
char * const rgr_var, /* I [sng] Variable for special regridding treatment */
char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */
const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */
char **xtn_var, /* [sng] I Extensive variables */
const int xtn_nbr) /* [nbr] I Number of extensive variables */
{
/* Purpose: Initialize regridding structure */
const char fnc_nm[]="nco_rgr_ini()";
rgr_sct *rgr;
/* Allocate */
rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct));
/* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */
rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */
rgr->flg_usr_rqs=False; /* [flg] User requested regridding */
rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */
rgr->in_id=in_id; /* [id] Input netCDF file ID */
rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */
rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */
rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */
rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */
rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */
rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */
rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */
rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */
rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */
rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */
rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */
rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */
rgr->fl_hrz=rgr_hrz; /* [sng] [sng] File containing horizontal coordinate grid (for S1D) */
rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */
rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */
rgr->xtn_var=xtn_var; /* [sng] Extensive variables */
rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */
/* Did user explicitly request regridding? */
if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True;
/* Initialize arguments after copying */
if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc");
if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc");
// if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO");
if(nco_dbg_lvl_get() >= nco_dbg_crr){
(void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs);
(void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr);
(void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL");
(void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL");
(void)fprintf(stderr,"fl_hrz = %s, ",rgr->fl_hrz ? rgr->fl_hrz : "NULL");
(void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL");
(void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL");
(void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL");
(void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL");
(void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL");
(void)fprintf(stderr,"\n");
} /* endif dbg */
/* Flags */
if(wgt_vld_thr == NC_MIN_DOUBLE){
rgr->flg_rnr=False;
}else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){
/* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */
// const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */
rgr->flg_rnr=True;
rgr->wgt_vld_thr=wgt_vld_thr;
}else{
(void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Parse extended kvm options */
char *sng_fnl=NULL;
int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */
int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */
int rgr_var_nbr=0;
kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */
if(rgr_arg_nbr > 0){
/* Join arguments together */
sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr);
rgr_lst=nco_arg_mlt_prs(sng_fnl);
if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl);
/* Count number of keys */
for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */
} /* !rgr_arg_nbr */
/* NULL-initialize key-value properties required for string variables */
rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */
rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */
rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */
rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */
rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */
rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */
rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */
rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */
rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */
rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */
rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */
rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */
rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */
rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */
rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */
rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */
rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */
rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */
rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */
rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */
rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */
rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */
rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */
rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */
rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */
/* Initialize key-value properties used in grid and weight generation */
rgr->area_mth=1; /* [enm] Method to compute grid cell area */
rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */
rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */
rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */
rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */
rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */
rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */
rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */
rgr->flg_add_fll=False; /* [flg] Add _FillValue to fields with empty destination cells */
rgr->flg_area_out=True; /* [flg] Add area to output */
rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */
rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */
rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */
rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */
rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */
rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */
rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */
rgr->flg_msk_apl=False; /* [flg] Apply msk_out to variables after regridding */
rgr->flg_msk_out=False; /* [flg] Add mask to output */
rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */
rgr->flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */
rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */
rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */
rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */
rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */
rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */
rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */
rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */
rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */
rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */
rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */
rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */
rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */
rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */
rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */
rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */
rgr->msk_var=NULL; /* [sng] Mask-template variable */
rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */
rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */
rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */
rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */
rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */
rgr->xtr_nsp=8; /* [sng] Extrapolation number of source points */
rgr->xtr_xpn=2.0; /* [sng] Exponent of distance in extrapolation (absolute value) */
rgr->wgt_typ=nco_wgt_con; /* [enm] Weight generation method */
/* Parse key-value properties */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){
if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){
rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val);
rgr->flg_grd=True;
continue;
} /* !grid */
if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){
rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !hnt_dst */
if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){
rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !hnt_src */
if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){
rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val);
rgr->flg_msk_out=True;
continue;
} /* !msk_var */
if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){
rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !msh */
if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){
rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val);
rgr->flg_grd=True;
continue;
} /* !skl */
if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){
rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val);
rgr->flg_nfr=True;
continue;
} /* !ugrid */
if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_hrz") || !strcasecmp(rgr_lst[rgr_var_idx].key,"hrz")){
rgr->fl_hrz=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !hrz */
if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){
rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !vrt */
if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){
rgr->flg_area_out=False;
continue;
} /* !area */
if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){
rgr->flg_msk_out=False;
continue;
} /* !msk */
if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_apl") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_apply")){
rgr->flg_msk_apl=True;
/* Ensure masked fields regridded with TR maps have _FillValue to guarantee BFB arithmetic
with masked fields regridded with other maps that adhere to SCRIP/ESMF mask rules */
rgr->flg_add_fll=True;
continue;
} /* !msk_apl */
if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){
rgr->flg_msk_out=True;
continue;
} /* !mask */
if(!strcmp(rgr_lst[rgr_var_idx].key,"add_fll") || !strcmp(rgr_lst[rgr_var_idx].key,"add_fill_value") || !strcmp(rgr_lst[rgr_var_idx].key,"fll_mpt") || !strcmp(rgr_lst[rgr_var_idx].key,"fill_empty")){
rgr->flg_add_fll=True;
continue;
} /* !add_fll */
if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){
rgr->flg_cll_msr=True;
continue;
} /* !cell_measures */
if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){
rgr->flg_cll_msr=False;
continue;
} /* !cell_measures */
if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){
rgr->flg_crv=True;
continue;
} /* !curvilinear */
if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){
rgr->flg_dgn_area=True;
continue;
} /* !diagnose_area */
if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){
rgr->flg_dgn_bnd=True;
continue;
} /* !diagnose_bounds */
if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){
rgr->flg_cf_units=True;
rgr->flg_erwg_units=False;
continue;
} /* !erwg_units */
if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_quad")){
rgr->area_mth=2;
continue;
} /* !area_nco */
if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_nco")){
rgr->area_mth=1;
continue;
} /* !area_nco */
if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){
rgr->edg_typ=nco_edg_gtc;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){
rgr->edg_typ=nco_edg_smc;
(void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get());
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){
rgr->edg_typ=nco_edg_crt;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !edg_typ */
if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){
rgr->flg_cf_units=False;
rgr->flg_erwg_units=True;
continue;
} /* !erwg_units */
if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){
rgr->flg_nfr=True;
continue;
} /* !infer */
if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){
rgr->flg_stg=False;
continue;
} /* !stagger */
if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){
if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl);
rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val);
/* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl
20180905 Reverted this since quoting command in ncremap is superior solution */
if(False){
size_t ttl_lng=strlen(rgr->grd_ttl);
for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++)
if(rgr->grd_ttl[ttl_idx] == '~'){
if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character
else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted
} /* !tilde */
} /* !0 */
continue;
} /* !grd_ttl */
if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){
rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd);
continue;
} /* !idx_dbg */
if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){
cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr);
assert(cnv_nbr == 2);
continue;
} /* !latlon */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){
cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr);
assert(cnv_nbr == 2);
continue;
} /* !lonlat */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){
rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd);
continue;
} /* !lat_nbr */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){
rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd);
continue;
} /* !lon_nbr */
if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){
cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est);
if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
assert(cnv_nbr == 4);
if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */
continue;
} /* !snwe */
if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){
if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt);
assert(cnv_nbr == 4);
continue;
} /* !wesn */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){
rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
continue;
} /* !lat_crv */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){
rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
continue;
} /* !lon_crv */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){
rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
// rgr->lat_typ=nco_grd_lat_bb;
continue;
} /* !lat_sth */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){
rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
rgr->lon_typ=nco_grd_lon_bb;
continue;
} /* !lon_wst */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){
rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
//rgr->lat_typ=nco_grd_lat_bb;
continue;
} /* !lat_nrt */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){
rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
rgr->lon_typ=nco_grd_lon_bb;
continue;
} /* !lon_est */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){
rgr->lat_drc=nco_grd_lat_drc_s2n;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){
rgr->lat_drc=nco_grd_lat_drc_n2s;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !lat_drc */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){
rgr->lat_typ=nco_grd_lat_fv;
rgr->grd_typ=nco_grd_2D_fv;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){
rgr->lat_typ=nco_grd_lat_eqa;
rgr->grd_typ=nco_grd_2D_eqa;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){
rgr->lat_typ=nco_grd_lat_gss;
rgr->grd_typ=nco_grd_2D_gss;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !lat_typ */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180"))
rgr->lon_typ=nco_grd_lon_180_wst;
else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180"))
rgr->lon_typ=nco_grd_lon_180_ctr;
else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn"))
rgr->lon_typ=nco_grd_lon_Grn_wst;
else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn"))
rgr->lon_typ=nco_grd_lon_Grn_ctr;
else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !lon_typ */
if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){
rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !area_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){
rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !bnd_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){
rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !bnd_tm_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){
rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !col_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){
rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !col_nm_out */
if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){
rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !frc_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){
rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !ilev_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){
rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !ilev_nm_out */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){
rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_bnd_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){
rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_dmn_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){
rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){
rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_nm_out */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){
rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_vrt_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){
rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lat_wgt_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){
rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lev_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){
rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lev_nm_out */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){
rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lon_bnd_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){
rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lon_dmn_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){
rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lon_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){
rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lon_nm_out */
if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){
rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !lon_vrt_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){
rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !plev_nm_in */
if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){
rgr->ply_tri_mth=nco_ply_tri_mth_csz;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){
rgr->ply_tri_mth=nco_ply_tri_mth_ctr;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !ply_tri */
if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){
rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !sgs_frc */
if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){
rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !sgs_msk */
if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){
rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
continue;
} /* !sgs_nrm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){
rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd);
continue;
} /* !tst */
if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){
rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
rgr->flg_msk_out=True;
continue;
} /* !msk_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){
rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val);
continue;
} /* !vrt_nm */
if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){
rgr->ntp_mth=nco_ntp_lnr;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){
rgr->ntp_mth=nco_ntp_log;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !ntp_mth */
if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){
rgr->xtr_mth=nco_xtr_fll_ngh;
}else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){
rgr->xtr_mth=nco_xtr_fll_msv;
}else{
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !xtr_mth */
if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_nsp") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_nbr_src_pnt") || !strcmp(rgr_lst[rgr_var_idx].key,"number_source_points") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_number_source_points")){
rgr->xtr_nsp=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd);
continue;
} /* !xtr_nsp */
if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_xpn") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_exponent") || !strcmp(rgr_lst[rgr_var_idx].key,"exponent_of_distance_in_extrapolation")){
rgr->xtr_xpn=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd);
if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd);
continue;
} /* !xtr_xpn */
if(!strcmp(rgr_lst[rgr_var_idx].key,"wgt_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"weight_type")){
if(!strcasecmp(rgr_lst[rgr_var_idx].val,"con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"conservative") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_con"))
rgr->wgt_typ=nco_wgt_con;
else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"distance_weighted") || !strcasecmp(rgr_lst[rgr_var_idx].val,"inverse_distance") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_idw"))
rgr->wgt_typ=nco_wgt_idw;
else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"bilinear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_bln"))
rgr->wgt_typ=nco_wgt_bln;
else {
(void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val);
abort();
} /* !val */
continue;
} /* !wgt_typ */
(void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key);
nco_exit(EXIT_FAILURE);
} /* !rgr_var_idx */
/* Eliminate sticky wickets: Give nfr precedence over grd */
if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False;
/* Revert to defaults for any names not specified on command-line */
if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */
if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */
/* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */
if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */
if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */
if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */
if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */
if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */
if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */
if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */
if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */
if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */
if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */
if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */
if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask_b"); /* [sng] Name of variable containing destination mask */
if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */
if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */
/* Derived from defaults and command-line arguments */
// On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names
// if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */
// if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */
// if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */
// if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */
// if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */
/* Free kvms */
if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr);
return rgr;
} /* end nco_rgr_ini() */
int /* O [enm] Return code */
nco_ntp_vrt /* [fnc] Interpolate vertically */
(rgr_sct * const rgr, /* I/O [sct] Regridding structure */
trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */
{
/* Purpose: Interpolate fields to new vertical grid specified in a vertical file */
const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */
char *fl_tpl; /* [sng] Template file (vertical grid file) */
char *fl_pth_lcl=NULL;
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int in_id; /* I [id] Input netCDF file ID */
int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */
int md_open; /* [enm] Mode flag for nc_open() call */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int dmn_idx; /* [idx] Dimension index */
int rec_idx; /* [idx] Record dimension index */
nco_bool FL_RTR_RMT_LCN;
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt);
/* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */
fl_tpl=(char *)strdup(rgr->fl_vrt);
/* Make sure file is on local system and is readable or die trying */
fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id);
/* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid:
prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev]
prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */
/* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid:
prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev]
prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */
/* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics
This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates
Only hybrid coordinates will refer to the "ilev" levels and indices
All single coordinate systems will refer to "lev" levels and indices */
int dpt_id; /* [id] Ocean depth ID */
int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */
int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */
int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */
int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */
int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */
int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */
int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */
int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */
int plev_id; /* [id] Air pressure ID */
nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */
nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */
nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */
nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */
nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */
nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */
nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */
nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */
nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */
nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */
nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */
nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */
nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */
/* Determine output grid type */
if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){
nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */
flg_grd_out_hyb=True;
}else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){
nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */
flg_grd_out_prs=True;
}else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){
nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */
flg_grd_out_dpt=True;
}else{ /* !hyai */
(void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get());
return NCO_ERR;
} /* !hyai */
if(flg_grd_out_hyb){
rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id);
rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id);
rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id);
rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id);
rcd=nco_inq_varid(tpl_id,"P0",&p0_id);
rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id);
rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id);
rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id);
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
rcd=nco_inq_varid(tpl_id,"plev",&lev_id);
} /* !flg_grd_out_prs */
if(flg_grd_out_dpt){
rcd=nco_inq_varid(tpl_id,"depth",&lev_id);
} /* !flg_grd_out_dpt */
const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */
const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */
const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */
const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */
const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */
const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */
const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */
const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */
char *ilev_nm_in=NULL; /* [sng] Interface level name */
char *lev_nm_in;
char *ilev_nm_out;
char *lev_nm_out;
char *plev_nm_in; /* [sng] Pure-pressure coordnate name */
char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */
int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */
int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */
int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */
int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */
int dmn_nbr_in; /* [nbr] Number of dimensions in input file */
int dmn_nbr_out; /* [nbr] Number of dimensions in output file */
int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */
int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */
int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */
int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */
int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */
int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */
int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */
long *dmn_cnt_in=NULL;
long *dmn_cnt_out=NULL;
long *dmn_srt=NULL;
long ilev_nbr_in;
long lev_nbr_in;
long ilev_nbr_out;
long lev_nbr_out;
long tm_idx=0L; /* [idx] Current timestep */
long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */
long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */
long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */
size_t grd_idx; /* [idx] Gridcell index */
size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */
size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */
size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */
if(flg_grd_out_hyb){
/* Interrogate hyai/hyam to obtain ilev/lev dimensions */
rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out);
rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out);
rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out);
rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out);
rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm);
ilev_nm_out=strdup(dmn_nm);
rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm);
lev_nm_out=strdup(dmn_nm);
/* Interrogate PS, if any, for horizontal dimensions */
if(ps_id_tpl != NC_MIN_INT){
rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps);
dmn_nbr_out=dmn_nbr_ps;
dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int));
dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long));
dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long));
rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out);
rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL);
if(dmn_nbr_rec > 0){
dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int));
rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec);
} /* !dmn_nbr_rec */
for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx);
/* 20190330: Allow possibility that PS has time dimension > 1
We want horizontal not temporal dimensions to contribute to grd_sz
Temporal dimension is usually unlimited
Only multiply grd_sz by fixed (non-unlimited) dimension sizes
Corner-case exception when PS spatial dimension on unstructured grid is unlimited */
for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++)
if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx])
break;
if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx];
if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){
tm_nbr_out=dmn_cnt_out[dmn_idx];
if(tm_nbr_out > 1L) flg_vrt_tm=True;
} /* tm_nbr_out > 1 */
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec);
} /* !ps_id_tpl */
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
/* Interrogate plev to obtain plev dimensions */
rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out);
rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out);
rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm);
ilev_nbr_out=lev_nbr_out;
} /* !flg_grd_out_prs */
double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */
double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */
double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */
double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */
double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */
double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */
double *ps_out=NULL; /* [Pa] Surface pressure on output grid */
double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */
double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */
double p0_out; /* [Pa] Reference pressure on output grid */
long ilev_idx; /* [idx] Interface level index */
long lev_idx; /* [idx] Level index */
const nc_type crd_typ_out=NC_DOUBLE;
nc_type var_typ_rgr; /* [enm] Variable type used during regridding */
var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */
if(flg_grd_out_hyb){
hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr));
hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr));
hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr));
hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr));
ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr));
lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr));
rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out);
rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out);
rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out);
rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out);
rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out);
if(ilev_id_tpl != NC_MIN_INT){
rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out);
}else{
/* p0 is in Pa but ilev traditionally given in hPa */
for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0;
} /* !ilev_id_tpl */
if(lev_id_tpl != NC_MIN_INT){
rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out);
}else{
/* p0 is in Pa but lev traditionally given in hPa */
for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0;
} /* !ilev_id_tpl */
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr));
rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out);
} /* !flg_grd_out_prs */
/* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori
Straightforward copy all variables and attributes that define grid from fl_tpl to output
would work in theory, but would not allow dynamic identification and relabeling of names */
/* if(flg_grd_out_hyb){
const int vrt_grd_lst_nbr=8;
const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"};
}
if(flg_grd_out_prs){
const int vrt_grd_lst_nbr=1;
const char *vrt_grd_lst[]={"/plev"};
} */
/* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file)
Below this line, fl_in and in_id refer to input file to be vertically regridded
Do not close template file until all grid variables have been copied
For maximum efficiency, do this after defining all interpolated variables in output
That way no file needs to exit define mode or enter data mode more than once
However this requires keeping template file, input data file, and output file simulataneously open */
in_id=rgr->in_id;
out_id=rgr->out_id;
/* Determine input grid type */
if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in;
if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){
nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */
flg_grd_in_hyb=True;
}else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){
nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */
flg_grd_in_prs=True;
}else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){
nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */
flg_grd_in_dpt=True;
}else{ /* !hyai */
(void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get());
return NCO_ERR;
} /* !hyai */
/* Sanity checks: One type of input and one type of output grid detected */
assert(!(flg_grd_in_hyb && flg_grd_in_prs));
assert(!(flg_grd_in_hyb && flg_grd_in_dpt));
assert(!(flg_grd_in_prs && flg_grd_in_dpt));
assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt);
assert(!(flg_grd_out_hyb && flg_grd_out_prs));
assert(!(flg_grd_out_hyb && flg_grd_out_dpt));
assert(!(flg_grd_out_prs && flg_grd_out_dpt));
assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt);
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt);
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt);
/* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */
nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */
if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb;
if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs;
if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb;
if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs;
assert(nco_vrt_ntp_typ != nco_ntp_nil);
/* Variables on input grid, i.e., on grid in data file to be interpolated */
if(flg_grd_in_hyb){
rcd=nco_inq_varid(in_id,"hyai",&hyai_id);
rcd=nco_inq_varid(in_id,"hyam",&hyam_id);
rcd=nco_inq_varid(in_id,"hybi",&hybi_id);
rcd=nco_inq_varid(in_id,"hybm",&hybm_id);
/* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM:
ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients
ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS
ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa
ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa
ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension
ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */
if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True;
else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True;
else{
(void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm);
abort();
} /* !rcd */
if(flg_grd_hyb_cameam){
rcd=nco_inq_varid(in_id,"P0",&p0_id);
ilev_id=NC_MIN_INT;
lev_id=NC_MIN_INT;
if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id);
if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id);
} /* !flg_grd_hyb_cameam */
/* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */
if(flg_grd_hyb_ecmwf)
rcd=nco_inq_varid(in_id,"lev",&lev_id);
} /* !flg_grd_in_hyb */
if(flg_grd_in_prs){
rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id);
if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){
/* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */
flg_grd_hyb_cameam=True;
if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm);
if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm);
}else{
if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){
(void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !ps_id_tpl */
} /* !ps_id */
} /* !flg_grd_in_prs */
if(flg_grd_in_dpt){
rcd=nco_inq_varid(in_id,"depth",&lev_id);
} /* !flg_grd_in_dpt */
const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */
const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */
const int ps_id_in=ps_id; /* [id] Surface pressure ID */
/* Identify all record-dimensions in input file */
rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL);
if(dmn_nbr_rec > 0){
dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int));
rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec);
} /* !dmn_nbr_rec */
if(flg_grd_in_hyb){
/* Get hybrid vertical information first */
rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in);
rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in);
if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in);
if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in);
rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in);
rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in);
rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm);
ilev_nm_in=strdup(dmn_nm);
rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm);
lev_nm_in=strdup(dmn_nm);
} /* !flg_grd_in_hyb */
if(flg_grd_in_prs){
/* Interrogate plev to obtain plev dimensions */
rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in);
rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in);
rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm);
lev_nm_in=strdup(dmn_nm);
/* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */
if(!flg_grd_out_hyb){
/* Problem: What is horizontal grid size of pressure grid file?
Algorithm:
Examine first multi-dimensional variable that includes plev dimension
Assume horizontal dimensions vary more rapidly than (i.e., follow) plev
Compute horizontal grid size accordingly
Set output horizontal size to input horizontal size */
int var_nbr; /* [nbr] Number of variables in file */
int var_idx; /* [idx] Index over variables in file */
rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL);
dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long));
for(var_idx=0;var_idx<var_nbr;var_idx++){
rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in);
rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++)
if(dmn_ids_in[dmn_idx] == dmn_id_lev_in)
break;
/* Does current variable have lev dimension? */
if(dmn_idx < dmn_nbr_in){
/* Yes. Do any dimensions vary more rapidly than lev? */
if(dmn_idx < dmn_nbr_in-1){
/* Yes. Assume remaining dimension are horizontal spatial dimensions */
char var_nm[NC_MAX_NAME+1L];
(void)nc_inq_varname(in_id,var_idx,var_nm);
for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz);
grd_sz_in*=dmn_cnt_in[dmn_idx_hrz];
} /* !dmn_idx_hrz */
break;
} /* !dmn_idx */
} /* !dmn_idx */
} /* !var_idx */
assert(var_idx != var_nbr);
grd_sz_out=grd_sz_in;
} /* !flg_grd_out_hyb */
} /* !flg_grd_in_prs */
double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */
double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */
double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */
double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */
double *lev_in=NULL; /* [Pa] Air pressure on input grid */
double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */
double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */
double *ps_in=NULL; /* [Pa] Surface pressure on input grid */
double p0_in; /* [Pa] Reference pressure on input grid */
if(flg_grd_in_hyb){
hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr));
hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr));
hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr));
hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr));
rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out);
rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out);
rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out);
rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out);
if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out);
/* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */
if(flg_grd_hyb_ecmwf){
/* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */
p0_in=100000.0;
for(size_t idx=0;idx<lev_nbr_in;idx++){
hyai_in[idx]/=p0_in;
hyam_in[idx]/=p0_in;
} /* !idx */
} /* flg_grd_hyb_ecmwf */
} /* !flg_grd_in_hyb */
if(flg_grd_in_prs){
lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr));
rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out);
} /* !flg_grd_in_prs */
/* Always obtain surface pressure if input or output grid is hybrid */
if(flg_grd_in_hyb || flg_grd_out_hyb){
/* Copy horizontal grid information from input file
LHS variables were set above if PS is in template file */
if(ps_id_tpl == NC_MIN_INT){
/* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */
rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */
dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long));
if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */
rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx);
/* 20190330: Allow possibility that PS has time dimension > 1
We want horizontal not temporal dimensions to contribute to grd_sz
Temporal dimension is usually unlimited
Only multiply grd_sz by fixed (non-unlimited) dimension sizes
Corner-case exception when PS spatial dimension on unstructured grid is unlimited */
for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++)
if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx])
break;
if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx];
if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){
dmn_id_tm_in=dmn_ids_in[dmn_idx];
dmn_idx_tm_in=dmn_idx;
tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in];
if(tm_nbr_in > 1L) flg_vrt_tm=True;
} /* tm_nbr_in > 1 */
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
/* Given all input PS information, define output PS information */
dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in;
dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int));
dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long));
/* fxm: next line works for hyb_in and is buggy for prs_in */
memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int));
memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long));
grd_sz_out=grd_sz_in;
tm_nbr_out=tm_nbr_in;
}else{ /* !ps_id_tpl */
/* 20200825:
We have already defined grd_sz_out if PS is in template file
We have already defined grd_sz_in and grd_sz_out := grd_sz_in when PS not in template file
We have already defined grd_sz_in if input file is pure-pressure
However, we have not yet defined grd_sz_in if input file is hybrid
Expectation is that grd_sz_in (from input file) = grd_sz_out (from template file)
An independent check on this would examine dimension sizes in input file
Such a check would immediately flag horizontal mismatches between vertical file and input file
The check could not rely on PS being present in input file
The check could/should examine the first horizontal variable in input file
This would require a lot of code, so we just assume it is true */
grd_sz_in=grd_sz_out;
} /* !ps_id_tpl */
/* Timestep sequencing
NB: tm_nbr_??? variables count timesteps in vertical grid definitions
These are not necessarily the same as the number of timesteps in either file
Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries
Usually hybrid grids have as many timesteps in the grids as in the timeseries
Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported)
This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */
if(tm_nbr_in > 1L || tm_nbr_out > 1L){
if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in);
} /* !tm_nbr_in */
tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out;
/* Sanity checks */
if(grd_sz_in != grd_sz_out || tm_nbr_in != tm_nbr_out) (void)fprintf(stdout,"%s: ERROR %s reports that temporal or horizontal spatial dimensions differ: grd_sz_in = %ld != %ld = grd_sz_out, and/or tm_nbr_in = %ld != %ld = tm_nbr_out\n",nco_prg_nm_get(),fnc_nm,grd_sz_in,grd_sz_out,tm_nbr_in,tm_nbr_out);
assert(grd_sz_in == grd_sz_out);
assert(tm_nbr_in == tm_nbr_out);
ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer");
/* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */
if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out);
/* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */
if(flg_grd_hyb_ecmwf){
/* Convert ECMWF-provided log(surface_pressure) to surface_pressure */
const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */
for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]);
} /* flg_grd_hyb_ecmwf */
/* Finally have enough information to allocate output pressure grid */
ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer");
/* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */
if(ps_id_tpl != NC_MIN_INT){
rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */
}else{
memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr));
} /* !ps_id_tpl */
} /* ! */
/* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */
nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */
flg_add_msv_att=False;
/* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */
if(xtr_mth == nco_xtr_fll_msv){
const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field
double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */
double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */
double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */
double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */
long idx_lev_max; // [idx] Index of midpoint level with greatest pressure
long idx_lev_min; // [idx] Index of midpoint level with lowest pressure
size_t idx; // [idx] Counting index
prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer");
prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer");
prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer");
prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer");
if(flg_grd_in_hyb){
// fxm: assumes hybrid grid has least/greatest pressure at top/bottom level
idx_lev_max=lev_nbr_in-1;
idx_lev_min=0L;
for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){
idx_fst=tm_idx*grd_sz_in;
for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){
prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max];
prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min];
} /* !grd_idx */
} /* !tm_idx */
} /* !flg_grd_in_hyb */
if(flg_grd_out_hyb){
// fxm: assumes hybrid grid has least/greatest pressure at top/bottom level
idx_lev_max=lev_nbr_out-1;
idx_lev_min=0L;
for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){
idx_fst=tm_idx*grd_sz_out;
for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){
prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max];
prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min];
} /* !grd_idx */
} /* !tm_idx */
} /* !flg_grd_out_hyb */
if(flg_grd_in_prs){
double lev_in_max;
double lev_in_min;
if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0];
if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1];
for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max;
for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min;
} /* !flg_grd_in_prs */
if(flg_grd_out_prs){
double lev_out_max;
double lev_out_min;
if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0];
if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1];
for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max;
for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min;
} /* !flg_grd_out_prs */
for(idx=0;idx<ps_sz;idx++)
if(prs_max_out[idx] > prs_max_in[idx]) break;
if(idx < ps_sz) flg_add_msv_att=True;
for(idx=0;idx<ps_sz;idx++)
if(prs_min_out[idx] < prs_min_in[idx]) break;
if(idx < ps_sz) flg_add_msv_att=True;
if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm);
if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in);
if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out);
if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in);
if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out);
} /* !xtr_mth */
/* Lay-out regridded file */
//(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps);
/* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */
if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out;
if(rgr->ilev_nm_out){
if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out;
if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out;
} /* !ilev_nm_out */
if(flg_grd_out_prs){
/* Unless user explicitly specifies output name, use same name as input */
if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in);
/* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */
ilev_nm_out=(char *)strdup(lev_nm_out);
} /* !flg_grd_out_prs */
/* Define new vertical dimensions before all else */
if(flg_grd_out_hyb){
rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out);
rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out);
/* Horizontal dimensions necessary to define PS variable */
for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
if(ps_id_tpl != NC_MIN_INT){
rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm);
}else{
rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm);
rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx);
} /* !ps_id_tpl */
if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx);
/* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */
if(flg_grd_hyb_ecmwf)
if(strcmp(dmn_nm,ilev_nm_out))
if(strcmp(dmn_nm,lev_nm_out))
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx);
} /* !dmn_idx */
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out);
} /* !flg_grd_out_prs */
/* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */
/* Exception list source:
CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS
EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS
ECMWF: hyai, hyam, hybi, hybm, lev, lnsp
NCEP: plev */
const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */
const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"};
int var_cpy_nbr=0; /* [nbr] Number of copied variables */
int var_rgr_nbr=0; /* [nbr] Number of regridded variables */
int var_xcl_nbr=0; /* [nbr] Number of deleted variables */
int var_crt_nbr=0; /* [nbr] Number of created variables */
long idx; /* [idx] Generic index */
unsigned int idx_tbl; /* [idx] Counter for traversal table */
const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */
for(idx=0;idx<var_xcl_lst_nbr;idx++){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++)
if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break;
if(idx_tbl < trv_nbr){
if(trv_tbl->lst[idx_tbl].flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll);
var_xcl_nbr++;
} /* endif */
trv_tbl->lst[idx_tbl].flg_xtr=False;
} /* !idx_tbl */
} /* !idx */
/* 20191001: Do not automatically define plev_nm_in in pressure-grid output files
The variable named lev_nm_out in the input data file is always defined in the output file
So if plev_nm_in == lev_nm_out it will be defined anyway */
if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++)
if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break;
if(idx_tbl < trv_nbr){
if(trv_tbl->lst[idx_tbl].flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll);
var_xcl_nbr++;
} /* endif */
trv_tbl->lst[idx_tbl].flg_xtr=False;
} /* !idx_tbl */
} /* !idx */
char *var_nm; /* [sng] Variable name */
int *dmn_id_in=NULL; /* [id] Dimension IDs */
int *dmn_id_out=NULL; /* [id] Dimension IDs */
int var_id_in; /* [id] Variable ID */
int var_id_out; /* [id] Variable ID */
nc_type var_typ_out; /* [enm] Variable type to write to disk */
nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
dfl_lvl=rgr->dfl_lvl;
fl_out_fmt=rgr->fl_out_fmt;
/* Define new coordinates and grid variables in regridded file */
const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
//const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */
//const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */
//const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */
if(flg_grd_out_hyb){
rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
// for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
// rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm);
// (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm);
// } /* !dmn_idx */
if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id);
if(flg_grd_hyb_ecmwf){
/* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */
int dmn_nbr_out_ecmwf=0;
for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm);
if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2"))
rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++);
} /* !dmn_idx */
rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id);
} /* !flg_grd_hyb_ecmwf */
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
(void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY);
(void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY);
(void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY);
(void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY);
if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */
if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY);
if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY);
if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY);
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
(void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY);
dmn_id_ilev_out=dmn_id_lev_out;
} /* !flg_grd_out_prs */
/* No further access to template file, close it */
nco_close(tpl_id);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl);
char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */
nco_bool has_ilev; /* [flg] Contains interface level dimension */
nco_bool has_lev; /* [flg] Contains midpoint level dimension */
nco_bool has_tm; /* [flg] Contains time dimension */
nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */
nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */
trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */
/* Define regridding flag for each variable */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn;
has_ilev=False;
has_lev=False;
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
/* Pre-determine flags necessary during next loop */
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
/* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */
if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in);
if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in);
} /* end loop over dimensions */
/* Regrid variables that contain either vertical dimension */
if(has_ilev || has_lev){
trv_tbl->lst[idx_tbl].flg_rgr=True;
var_rgr_nbr++;
if(has_ilev) need_prs_ntf=True;
if(has_lev) need_prs_mdp=True;
} /* endif */
assert(!(has_ilev && has_lev));
/* Copy all variables that are not regridded or omitted */
if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++;
} /* end nco_obj_typ_var */
} /* end idx_tbl */
if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm);
if(nco_dbg_lvl_get() >= nco_dbg_fl){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No");
} /* end idx_tbl */
} /* end dbg */
/* Pre-allocate dimension ID and cnt/srt space */
int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */
rcd+=nco_inq_ndims(in_id,&dmn_nbr_max);
dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
aed_sct aed_mtd_fll_val;
char *att_nm_fll_val=strdup("_FillValue");
int flg_pck; /* [flg] Variable is packed on disk */
nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */
double mss_val_dbl;
double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */
float mss_val_flt;
if(flg_add_msv_att){
aed_mtd_fll_val.att_nm=att_nm_fll_val;
aed_mtd_fll_val.mode=aed_create;
aed_mtd_fll_val.sz=1L;
mss_val_dbl=NC_FILL_DOUBLE;
mss_val_flt=NC_FILL_FLOAT;
} /* !flg_add_msv_att */
/* Define interpolated and copied variables in output file */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
var_nm=trv.nm;
/* Preserve input type in output type */
var_typ_out=trv.var_typ;
dmn_nbr_in=trv.nbr_dmn;
dmn_nbr_out=trv.nbr_dmn;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out);
/* If variable has not been defined, define it */
if(rcd != NC_NOERR){
if(trv.flg_rgr){
/* Interpolate */
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck);
if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm);
if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){
/* Change ilev dimension */
dmn_id_out[dmn_idx]=dmn_id_ilev_out;
dmn_cnt_out[dmn_idx]=ilev_nbr_out;
}else if(!strcmp(dmn_nm,lev_nm_in)){
/* Change lev dimension */
dmn_id_out[dmn_idx]=dmn_id_lev_out;
dmn_cnt_out[dmn_idx]=lev_nbr_out;
}else{
/* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */
rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx);
} /* !ilev */
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt_out[dmn_idx]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx);
} /* !rcd */
} /* !dmn_idx */
}else{ /* !flg_rgr */
/* Replicate non-interpolated variables */
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm);
rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx);
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt_out[dmn_idx]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx);
} /* !rcd */
} /* !dmn_idx */
} /* !flg_rgr */
rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out);
/* Duplicate netCDF4 settings when possible */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){
/* Deflation */
if(dmn_nbr_out > 0){
int dfl_lvl_in; /* [enm] Deflate level [0..9] */
rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in);
/* Copy original deflation settings */
if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in);
/* Overwrite HDF Lempel-Ziv compression level, if requested */
if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True;
/* Turn-off shuffle when uncompressing otherwise chunking requests may fail */
if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE;
/* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */
if(dfl_lvl >= 0) shuffle=NC_SHUFFLE;
if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl);
} /* !dmn_nbr_out */
} /* !NC_FORMAT_NETCDF4 */
(void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY);
/* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */
if(flg_add_msv_att && trv.flg_rgr){
has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl);
if(!has_mss_val){
nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */
aed_mtd_fll_val.var_nm=var_nm;
aed_mtd_fll_val.id=var_id_out;
aed_mtd_fll_val.type=var_typ_out;
if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt;
else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl;
flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val);
if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm);
} /* !has_mss_val */
} /* !flg_add_msv_att */
} /* !rcd */
} /* !var */
} /* !idx_tbl */
/* Free pre-allocated array space */
if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in);
if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Begin data mode */
(void)nco_enddef(out_id);
/* Copy all grid variables */
if(flg_grd_out_hyb){
(void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out);
(void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out);
(void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out);
(void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out);
(void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out);
(void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out);
(void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out);
(void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out);
} /* !flg_grd_out_hyb */
if(flg_grd_out_prs){
(void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out);
} /* !flg_grd_out_prs */
nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */
if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False;
size_t idx_in; /* [idx] Index into 3D input variables */
size_t idx_out; /* [idx] Index into 3D output variables */
size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */
size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */
/* Interpolate or copy variable values */
double *var_val_dbl_in=NULL;
double *var_val_dbl_out=NULL;
double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */
double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */
int lvl_idx_in; /* [idx] Level index on input grid */
int lvl_idx_out; /* [idx] Level index on output grid */
int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */
int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */
int thr_idx; /* [idx] Thread index */
size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */
size_t idx_dbg=rgr->idx_dbg;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped as shared in parallel clause */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
/* Repeating above documentation for the forgetful:
NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file
This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */
for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){
/* Index-offset to current surface pressure timeslice */
idx_fst=tm_idx*grd_sz_in;
if(need_prs_mdp){
/* Allocated and define midpoint pressures */
if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer");
if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer");
if(flg_grd_in_hyb)
for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++)
for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++)
prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx];
if(flg_grd_out_hyb)
for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++)
for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++)
prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx];
if(flg_grd_in_prs)
for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++)
for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++)
prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx];
if(flg_grd_out_prs)
for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++)
for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++)
prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx];
if(flg_ntp_log){
var_sz_in=grd_sz_in*lev_nbr_in;
for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]);
var_sz_out=grd_sz_out*lev_nbr_out;
for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]);
} /* !flg_ntp_log */
} /* !need_prs_mdp */
if(need_prs_ntf){
/* Allocate and define interface pressures */
if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer");
if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer");
if(flg_grd_in_hyb)
for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++)
for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++)
prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx];
if(flg_grd_out_hyb)
for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++)
for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++)
prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx];
if(flg_grd_in_prs)
for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++)
for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++)
prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx];
if(flg_grd_out_prs)
for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++)
for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++)
prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx];
if(flg_ntp_log){
var_sz_in=grd_sz_in*ilev_nbr_in;
for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]);
var_sz_out=grd_sz_out*ilev_nbr_out;
for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]);
} /* !flg_ntp_log */
} /* !need_prs_ntf */
/* Set firstprivate variables to initial values */
has_ilev=False;
has_lev=False;
has_tm=False;
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n");
#ifdef __GNUG__
# define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
# if GCC_LIB_VERSION < 490
# define GXX_OLD_OPENMP_SHARED_TREATMENT 1
# endif /* 480 */
# if GCC_LIB_VERSION >= 900
# define GXX_WITH_OPENMP5_GPU_SUPPORT 1
# endif /* 900 */
#endif /* !__GNUC__ */
#if defined( __INTEL_COMPILER)
# pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth)
#else /* !__INTEL_COMPILER */
# ifdef GXX_OLD_OPENMP_SHARED_TREATMENT
# pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth)
# else /* !old g++ */
# if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0
# pragma omp target teams distribute parallel for
# else
# pragma omp parallel for firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth)
# endif /* !GCC > 9.0 */
# endif /* !GCC < 4.9 */
#endif /* !__INTEL_COMPILER */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
thr_idx=omp_get_thread_num();
in_id=trv_tbl->in_id_arr[thr_idx];
#ifdef _OPENMP
if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : "");
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm);
#endif /* !_OPENMP */
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm);
if(trv.flg_rgr){
/* Interpolate variable */
var_nm=trv.nm;
if(!strcmp(var_nm,"US") || !strcmp(var_nm,"VS")) (void)fprintf(fp_stdout,"%s: WARNING %s reports attempt to vertically interpolate a variable named \"%s\". If this variable is from a CESM CAM or E3SM EAM output or initial condition file on a rectangular grid (e.g., FV 0.9x1.25), then expect this program to fail and dump core when interpolating US and to produce slightly incorrect answers for VS. The vertical interpolation routine requires that interpolated variables be on the same horizontal grid as the supplied pressure field. However, the CAM/EAM US and VS variables from rectangular grid simulations are often on a horizontal grid, called the staggered grid, that is offset from the rest of the variables including the surface pressure. US usually sits on a grid that is staggered in latitude from, and is a slightly different size than, the surface pressure grid. This leads to a core dump. VS sits on a grid staggered in longitude from, though the same size as, the surface pressure field. The resulting interpolation will be based on surface pressure half a gridcell to the east rather than centered with VS. The correct procedure to vertically interpolate US and VS is to 1) horizontally regrid the supplied surface pressure (often \"PS\") to the staggered grid, then 2) vertically interpolate US and VS to the desired vertical grid based on the surface pressure on the staggered grid, then 3) re-combine the interpolated US and VS with the interpolated versions of the rest of the variables. The best solution to this dilemma is to script this workflow. Contact Charlie if you need help with this.\n",nco_prg_nm_get(),fnc_nm,var_nm);
var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */
var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */
var_sz_in=1L;
var_sz_out=1L;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid(out_id,var_nm,&var_id_out);
rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in);
rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out);
dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out;
dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int));
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx);
if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True;
if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True;
if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True;
if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){
dmn_cnt_in[dmn_idx]=1L;
dmn_srt[dmn_idx]=tm_idx;
}else{
dmn_srt[dmn_idx]=0L;
} /* !flg_vrt_tm */
var_sz_in*=dmn_cnt_in[dmn_idx];
} /* !dmn_idx */
var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer");
rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr);
for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
/* Dimension count vector is same as input except for lvl dimension */
dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx];
if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out;
if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out;
var_sz_out*=dmn_cnt_out[dmn_idx];
} /* !dmn_idx */
var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer");
/* Missing value setup */
has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl);
if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE;
if(has_ilev){
/* Interpolate current variable from input interface pressure grid to output interface pressure grid */
lvl_nbr_in=ilev_nbr_in;
lvl_nbr_out=ilev_nbr_out;
prs_ntp_in=prs_ntf_in;
prs_ntp_out=prs_ntf_out;
}else{
/* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */
lvl_nbr_in=lev_nbr_in;
lvl_nbr_out=lev_nbr_out;
prs_ntp_in=prs_mdp_in;
prs_ntp_out=prs_mdp_out;
} /* !ilev */
/* Procedure: Extract input/output coordinate/data arrays into 1D column order
This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines
After interpolating into 1D sequential memory, copy back to ND output and repeat */
double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */
double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */
double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */
double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */
double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */
double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */
double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */
double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */
nco_xtr_sct xtr_LHS;
nco_xtr_sct xtr_RHS;
size_t brk_lft_idx;
size_t brk_rgt_idx;
size_t in_idx;
size_t in_nbr;
size_t out_nbr;
size_t out_idx;
/* Default extrapolation uses nearest valid neighbor */
xtr_LHS.xtr_fll=True;
xtr_LHS.xtr_vrb=False;
xtr_LHS.typ_fll=xtr_mth;
xtr_RHS.xtr_fll=True;
xtr_RHS.xtr_vrb=False;
xtr_RHS.typ_fll=xtr_mth;
/* Special-case extrapolation methods allowed for all except missing-value extrapolation types */
if(xtr_mth != nco_xtr_fll_msv){
if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt;
else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph;
} /* !xtr_mth */
crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double));
crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double));
dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double));
dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double));
in_nbr=lvl_nbr_in;
out_nbr=lvl_nbr_out;
nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */
nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */
/* Determine monotonicity direction only once, based on first vertical column */
if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False;
out_ncr=True;
if(out_nbr > 1)
if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0)
out_ncr=False;
/* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */
if(!in_ncr){
crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double));
dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double));
} /* !in_ncr */
if(!out_ncr){
crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double));
dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double));
} /* !out_ncr */
/* Constants and parameters for extrapolation */
const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */
const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */
const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */
nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */
nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */
if(tm_idx == 0){
/* Only print extrapolation warnings for first timestep to prevent noisy output
NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */
FIRST_WARNING_LHS=True;
FIRST_WARNING_RHS=True;
} /* !tm_idx */
/* Outer loop over columns */
for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){
/* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */
for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){
idx_in=grd_idx+lvl_idx_in*grd_nbr;
crd_in[lvl_idx_in]=prs_ntp_in[idx_in];
dat_in[lvl_idx_in]=var_val_dbl_in[idx_in];
} /* !lvl_idx_in */
for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){
idx_out=grd_idx+lvl_idx_out*grd_nbr;
crd_out[lvl_idx_out]=prs_ntp_out[idx_out];
} /* !lvl_idx_out */
/* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase
However, monotonically decreasing coordinates useful in many cases, such as depth coordinate,
and pressure levels arranged largest to smallest (favored by CMIP)
Next code block reverses array(s) if necessary so coordinates monotonically increase
Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption
Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */
if(in_ncr){
crd_in_mnt=crd_in;
dat_in_mnt=dat_in;
}else{
for(in_idx=0;in_idx<in_nbr;in_idx++){
crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1];
dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1];
} /* !in_idx */
} /* !in_ncr */
if(out_ncr){
crd_out_mnt=crd_out;
dat_out_mnt=dat_out;
}else{
for(out_idx=0;out_idx<out_nbr;out_idx++)
crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1];
} /* !out_ncr */
// Initialize bracketing index
brk_lft_idx=0;
// Loop over desired output coordinates
for(out_idx=0;out_idx<out_nbr;out_idx++){
// Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr
while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){
brk_lft_idx++;
} // !while
brk_lft_idx--;
// Handle identity interpolation separately to preserve symmetry in extrapolation code
if(brk_lft_idx != in_nbr-1){
if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){
dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1];
if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works
continue; // Jump to next iteration
} // !crd_in_mnt
} // !brk_lft_idx
if(brk_lft_idx == -1){
// LHS Extrapolation required
// Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0]
brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works
if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]);
// Extrapolation options are presented in decreasing order of preference
if(!xtr_LHS.xtr_fll){
(void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm);
// return NCO_ERR;
} /* !xtr_LHS.xtr_fll */
switch(xtr_LHS.typ_fll){
case nco_xtr_fll_nil:
dat_out_mnt[out_idx]=0.0;
break;
case nco_xtr_fll_msv:
dat_out_mnt[out_idx]=mss_val_cmp_dbl;
break;
case nco_xtr_fll_ngh:
dat_out_mnt[out_idx]=dat_in_mnt[0];
break;
case nco_xtr_fll_lnr:
dat_out_mnt[out_idx]=dat_in_mnt[0]-
(crd_in_mnt[0]-crd_out_mnt[out_idx])*
(dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]);
break;
case nco_xtr_fll_gph:
if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */
dat_out_mnt[out_idx]=dat_in_mnt[0]+
Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]);
else /* Interpolate with logarithm of pressure coordinates */
dat_out_mnt[out_idx]=dat_in_mnt[0]+
Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]);
if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm);
FIRST_WARNING_LHS=False;
break;
default:
(void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm);
// return NCO_ERR;
break;
} // !xtr_LHS.typ_fll
if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]);
}else if(brk_lft_idx < in_nbr-1){
// Normal case: crd_out_mnt is interpolable
brk_rgt_idx=brk_lft_idx+1;
// NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx
// This simulaneously meets two criteria:
// 1. Divide-by-zero errors are impossible in the next step
// 2. The identity interpolation is satisfied since crd_dlt == 0.0:
// i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx]
// Linearly interpolate
dat_out_mnt[out_idx]=
dat_in_mnt[brk_lft_idx]+
(crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])*
(dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/
(crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]);
}else if(brk_lft_idx == in_nbr-1){
// RHS Extrapolation required
// Degenerate case: brk_lft_idx is last element of crd_in_mnt
brk_rgt_idx=brk_lft_idx;
if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]);
// Extrapolation options are presented in decreasing order of preference
if(!xtr_RHS.xtr_fll){
(void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm);
// return NCO_ERR;
} /* !xtr_RHS.xtr_fll */
switch(xtr_RHS.typ_fll){
case nco_xtr_fll_nil:
dat_out_mnt[out_idx]=0.0;
break;
case nco_xtr_fll_msv:
dat_out_mnt[out_idx]=mss_val_cmp_dbl;
break;
case nco_xtr_fll_ngh:
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1];
break;
case nco_xtr_fll_lnr:
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+
(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*
(dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/
(crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]);
break;
case nco_xtr_fll_tpt:
if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+
(exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist;
else /* Coordinates are already linear in pressure */
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+
(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist;
if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm);
FIRST_WARNING_RHS=False;
break;
case nco_xtr_fll_gph:
if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]-
Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]);
else /* Interpolate with logarithm of pressure coordinates */
dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]-
Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]);
if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm);
FIRST_WARNING_RHS=False;
break;
default:
(void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm);
// return NCO_ERR;
break;
} // !xtr_RHS.typ_fll
if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]);
}else{
(void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm);
// return NCO_ERR;
} // !RHS
} // !out_idx
/* Un-reverse output data to be on original grid */
if(!out_ncr)
for(out_idx=0;out_idx<out_nbr;out_idx++)
dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1];
// End of vec.hh code
/* Copy answers into output array */
for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){
idx_out=grd_idx+lvl_idx_out*grd_nbr;
var_val_dbl_out[idx_out]=dat_out[lvl_idx_out];
} /* !lvl_idx_out */
if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){
(void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg);
for(out_idx=0;out_idx<out_nbr;out_idx++){
(void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]);
} /* !out_idx */
} /* !dbg */
} /* !grd_idx */
if(crd_in) crd_in=(double *)nco_free(crd_in);
if(crd_out) crd_out=(double *)nco_free(crd_out);
if(dat_in) dat_in=(double *)nco_free(dat_in);
if(dat_out) dat_out=(double *)nco_free(dat_out);
if(!in_ncr){
if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt);
if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt);
} /* !in_ncr */
if(!out_ncr){
if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt);
if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt);
} /* !out_ncr */
if(nco_typ_ntg(var_typ_out)){
/* 20210407: Round, with rint(), integer fields before sending to netCDF for output
Otherwise implicit type conversion will truncate (rather than round) output values
This is critical for masks where rounding errors produce near integer values (e.g., 0.999...)
that could then be truncated to zero by implicit conversion instead of rounded up to 1. */
for(idx_out=0;idx_out<var_sz_out;idx_out++)
if(var_val_dbl_out[idx_out] != mss_val_cmp_dbl)
var_val_dbl_out[idx_out]=rint(var_val_dbl_out[idx_out]);
} /* !nco_typ_ntg() */
#pragma omp critical
{ /* begin OpenMP critical */
rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr);
} /* end OpenMP critical */
if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in);
if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in);
if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out);
}else{ /* !trv.flg_rgr */
/* Use standard NCO copy routine for variables that are not regridded
20190511: Copy them only once */
if(tm_idx == 0){
#pragma omp critical
{ /* begin OpenMP critical */
(void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl);
} /* end OpenMP critical */
} /* !tm_idx */
} /* !flg_rgr */
} /* !xtr */
} /* end (OpenMP parallel for) loop over idx_tbl */
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n");
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr);
} /* !tm_idx */
if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in);
if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out);
if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in);
if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in);
if(hyai_in) hyai_in=(double *)nco_free(hyai_in);
if(hyam_in) hyam_in=(double *)nco_free(hyam_in);
if(hybi_in) hybi_in=(double *)nco_free(hybi_in);
if(hybm_in) hybm_in=(double *)nco_free(hybm_in);
if(ps_in) ps_in=(double *)nco_free(ps_in);
if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in);
if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in);
if(hyai_out) hyai_out=(double *)nco_free(hyai_out);
if(hyam_out) hyam_out=(double *)nco_free(hyam_out);
if(hybi_out) hybi_out=(double *)nco_free(hybi_out);
if(hybm_out) hybm_out=(double *)nco_free(hybm_out);
if(ilev_out) ilev_out=(double *)nco_free(ilev_out);
if(lev_in) lev_in=(double *)nco_free(lev_in);
if(lev_out) lev_out=(double *)nco_free(lev_out);
if(ps_out) ps_out=(double *)nco_free(ps_out);
if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out);
if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out);
return rcd;
} /* !nco_ntp_vrt() */
int /* O [enm] Return code */
nco_rgr_wgt /* [fnc] Regrid with external weights */
(rgr_sct * const rgr, /* I/O [sct] Regridding structure */
trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */
{
/* Purpose: Regrid fields using external weights contained in a mapfile
Examine ESMF, SCRIP, Tempest map-files:
ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m
ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m
ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m
Test ESMF, SCRIP, Tempest map-files:
ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc
ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc
ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc
Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here:
http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip
Conventions:
grid_size: Number of gridcells (product of lat*lon)
address: Source and destination index for each link pair
num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix
num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below)
= 1 Bilinear
Destination grid value determined by weights times known source grid values
at vertices of source quadrilateral that bounds destination point P
One weight per vertice guarantees fxm but is not conservative
Bilinear requires logically rectangular grid
= 1 Distance-based:
Distance-weighted uses values at num_neighbors points
The weight is inversely proportional to the angular distance from
the destination point to each neighbor on the source grid
= 3 Second-order conservative:
Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210
First-order conservative schemes assume fluxes are constant within gridcell
Destination fluxes are simple summations of sources fluxes weighted by overlap areas
Old clm and bds remappers use a first-order algorithm
Second-order improves this by using a first-order Taylor expansion of flux
Source flux is centroid value plus directional offset determined by dot product
of directional gradient and vector pointing from vertice to centroid.
Three weights per vertice are centroid weight, weight times local theta-gradient from
centroid to vertice, and weight times local phi-gradient from centroid to vertice.
= 4 Bicubic:
The four weights are gradients in each direction plus a cross-gradient term
Same principle as bilinear, but more weights per vertice
Bicubic requires logically rectangular grid
wgt:
Maximum number of source cells contributing to destination cell is not a dimension
in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays
Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16:
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
// Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid
// Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid)
dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0];
// Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation)
dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]];
// Normalization: none (F = angular area that participates in remapping)
dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]);
} // end loop over lnk
Documentation:
NCL special cases described in popRemap.ncl, e.g., at
https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl
ESMF Regridding Status:
https://www.earthsystemcog.org/projects/esmf
Sample regrid T42->POP43, SCRIP:
ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */
const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */
char *fl_in;
char *fl_pth_lcl=NULL;
const double rdn2dgr=180.0/M_PI;
const double dgr2rdn=M_PI/180.0;
const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */
double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */
double area_out_ttl=0.0; /* [frc] Exact sum of area */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int in_id; /* I [id] Input netCDF file ID */
int md_open; /* [enm] Mode flag for nc_open() call */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int dmn_idx; /* [idx] Dimension index */
int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */
int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */
int dst_grid_size_id; /* [id] Destination grid size dimension ID */
int num_links_id; /* [id] Number of links dimension ID */
int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */
int src_grid_corners_id; /* [id] Source grid corners dimension ID */
int src_grid_rank_id; /* [id] Source grid rank dimension ID */
int src_grid_size_id; /* [id] Source grid size dimension ID */
long int lat_idx;
long int lon_idx;
short int bnd_idx;
nco_bool FL_RTR_RMT_LCN;
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */
nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */
nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */
nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */
nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */
nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */
nco_mpf_sct mpf;
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map);
/* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */
fl_in=(char *)strdup(rgr->fl_map);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Identify mapping file type using string generated by weight-generator:
ESMF: title = "ESMF Offline Regridding Weight Generator"
ESMF_weight_only: title = "ESMF Regrid Weight Generator"
NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator"
MBTR: Title = "MOAB-TempestRemap Online Regridding Weight Generator"
SCRIP: conventions = "SCRIP"
Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */
char *att_val;
char *att_cnv_val=NULL;
char *att_gnr_val=NULL;
char *att_ttl_val=NULL;
char *cnv_sng=NULL;
/* netCDF standard is uppercase Conventions, though some models user lowercase */
char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */
char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */
char att_sng_gnr[]="weight_generator"; /* [sng] CMIP6 standard string */
char att_sng_Ttl[]="Title"; /* [sng] MBTR, NCO, and Tempest use "Title" attribute. MBTR and Tempest do not use "Conventions" */
char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */
char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */
nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */
nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */
/* Look for map-type signature in [cC]onventions or [tT]itle attribute */
att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv);
if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv);
att_gnr_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_gnr);
att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl);
if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl);
/* Either "[cC]onventions" or "[tT]itle" attribute determines map-file type... */
if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP;
if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){
if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF;
else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO;
else if(strstr(att_ttl_val,"MOAB-TempestRemap")) nco_rgr_mpf_typ=nco_rgr_mpf_MBTR;
else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest;
else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only;
} /* !att_ttl_val */
if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_cnv_val){
if(strstr(att_cnv_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO;
} /* !att_gnr_val */
if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_gnr_val){
if(strstr(att_gnr_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO;
} /* !att_gnr_val */
if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){
(void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\" and/or \"weight_generator\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : "",att_gnr_val ? att_gnr_val : "");
nco_rgr_mpf_typ=nco_rgr_mpf_unknown;
} /* !nco_rgr_mpf_typ */
if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val);
if(att_gnr_val) att_gnr_val=(char *)nco_free(att_gnr_val);
if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val);
switch(nco_rgr_mpf_typ){
case nco_rgr_mpf_SCRIP:
rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id);
rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id);
rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id);
rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id);
rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id);
rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id);
rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id);
rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id);
break;
case nco_rgr_mpf_ESMF_weight_only:
rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id);
break;
case nco_rgr_mpf_ESMF:
case nco_rgr_mpf_MBTR:
case nco_rgr_mpf_NCO:
case nco_rgr_mpf_Tempest:
case nco_rgr_mpf_unknown:
rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id);
rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id);
rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id);
rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id);
rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id);
rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id);
if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){
rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id);
if(rcd != NC_NOERR){
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm);
rcd=NC_NOERR;
} /* !rcd */
} /* !nco_rgr_mpf_Tempest */
rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id);
break;
default:
(void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm);
nco_dfl_case_generic_err();
/* NB: This return never executes because nco_dfl_case_generic_err() calls exit()
Return placed here to suppress clang -Wsometimes-uninitialized warnings
This is done many other times throughout the code, though explained only once, here */
return NCO_ERR;
break;
} /* end switch */
/* Use dimension IDs to get dimension sizes */
rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links);
if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){
rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size);
rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size);
rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners);
rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners);
rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank);
rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank);
/* TempestRemap does not generate num_wgts */
if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){
mpf.num_wgts=int_CEWI;
}else{
rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts);
} /* !num_wgts_id */
assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX);
}else{
mpf.src_grid_size=long_CEWI;
mpf.dst_grid_size=long_CEWI;
mpf.src_grid_corners=long_CEWI;
mpf.dst_grid_corners=long_CEWI;
mpf.src_grid_rank=long_CEWI;
mpf.dst_grid_rank=long_CEWI;
mpf.num_wgts=int_CEWI;
} /* !ESMF_weight_only */
cnv_sng=strdup("normalization");
nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil;
att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng);
if(att_val){
if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */
if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */
if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none;
if(att_val) att_val=(char *)nco_free(att_val);
}else{
/* 20150712: Tempest does not store a normalization attribute
20170620: ESMF weight_only does not store a normalization attribute
20190312: NCO does not yet store a normalization attribute */
if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown;
} /* endif normalization */
assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil);
if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng);
cnv_sng=strdup("map_method");
nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil;
att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng);
if(att_val){
if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative;
if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear;
if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none;
if(att_val) att_val=(char *)nco_free(att_val);
}else{
/* Tempest does not store a map_method attribute */
if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown;
} /* endif */
if(nco_rgr_mth_typ == nco_rgr_mth_nil) (void)fprintf(stdout,"%s: WARNING %s reports map global attribute %s = %s does not match SCRIP/ESMF conventions that support only values of \"Conservative\" and \"Bilinear\" for this attribute. Proceeding anyway...\n",nco_prg_nm_get(),fnc_nm,cnv_sng,att_val);
if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng);
if(nco_dbg_lvl_get() >= nco_dbg_scl){
(void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts);
} /* endif dbg */
/* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/
if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ));
nco_exit(EXIT_FAILURE);
} /* !msk */
/* Got to here in bullet-proofing code for weight-only map-files */
if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm);
assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only);
/* Set type of grid conversion */
if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D;
if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D;
if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D;
if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D;
assert(nco_rgr_typ != nco_rgr_grd_nil);
/* Save typing later */
nco_bool flg_grd_in_1D_dat_in_2D=False;
nco_bool flg_grd_in_1D=False;
nco_bool flg_grd_in_2D=False;
nco_bool flg_grd_out_1D=False;
nco_bool flg_grd_out_2D=False;
if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True;
if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True;
if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True;
if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True;
int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */
if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1;
/* Obtain grid values necessary to compute output latitude and longitude coordinates */
int area_dst_id; /* [id] Area variable ID */
int col_src_adr_id; /* [id] Source address (col) variable ID */
int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */
int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */
int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */
int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */
int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */
int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */
int frc_dst_id; /* [id] Fraction variable ID */
int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */
int row_dst_adr_id; /* [id] Destination address (row) variable ID */
int wgt_raw_id; /* [id] Remap matrix variable ID */
switch(nco_rgr_mpf_typ){
/* Obtain fields whose name depends on mapfile type */
case nco_rgr_mpf_SCRIP:
rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */
rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */
rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */
rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */
rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */
rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */
rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */
rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */
rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */
break;
case nco_rgr_mpf_ESMF:
case nco_rgr_mpf_ESMF_weight_only:
case nco_rgr_mpf_MBTR:
case nco_rgr_mpf_NCO:
case nco_rgr_mpf_Tempest:
case nco_rgr_mpf_unknown:
if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){
rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */
rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */
rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */
rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */
rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */
rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */
} /* !nco_rgr_mpf_ESMF_weight_only */
rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */
rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */
rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */
break;
default:
(void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map file type\n",nco_prg_nm_get(),fnc_nm);
nco_dfl_case_generic_err();
/* NB: This return never executes because nco_dfl_case_generic_err() calls exit()
Return placed here to suppress clang -Wsometimes-uninitialized warnings
This is done many other times throughout the code, though explained only once, here */
return NCO_ERR;
break;
} /* end switch */
/* Obtain fields whose presence depends on mapfile type */
nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */
nco_bool flg_msk_apl=rgr->flg_msk_apl; /* [flg] Apply msk_out to variables after regridding */
msk_dst_id=NC_MIN_INT;
if(flg_msk_out || flg_msk_apl){
switch(nco_rgr_mpf_typ){
case nco_rgr_mpf_SCRIP:
rcd=nco_inq_varid_flg(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */
break;
case nco_rgr_mpf_ESMF:
case nco_rgr_mpf_MBTR:
case nco_rgr_mpf_NCO:
case nco_rgr_mpf_Tempest:
case nco_rgr_mpf_unknown:
/* 20190315: TempestRemap did not propagate mask_a/b until ~201902
20210519: MBTR did not propagate mask_a/b as of ~202105 */
rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */
break;
default:
(void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm);
nco_dfl_case_generic_err();
} /* !nco_rgr_mpf_typ */
if(rcd == NC_ENOTVAR){
if(flg_msk_apl){
(void)fprintf(stderr,"%s: ERROR %s reports that user requested (with --mask_apply) the regridder to apply the destination mask field to variables after regridding. Unfortunately, the map-file lacks a destination mask of the expected name (usually \"mask_b\").\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* flg_msk_apl */
(void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_MBTR) ? "Probably this is either a TempestRemap map-file created before ~201902 when TR began to propagate mask_a/b variables, or it is a MOAB-TempestRemap file which has never (as of 202105) propagated mask_a/b variables" : "");
rcd=NC_NOERR;
} /* !rcd */
if(msk_dst_id == NC_MIN_INT) flg_msk_out=False;
} /* !flg_msk_out */
/* Obtain fields whose names are independent of mapfile type */
rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id);
rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id);
int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */
int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */
int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */
int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */
if(flg_grd_in_2D){
lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */
lat_psn_src=1;
if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){
/* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order
Newer behavior follows SCRIP [lon,lat] order
Challenge: Support both older and newer Tempest mapfiles
Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers
Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */
att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng);
if(att_val){
if(strstr(att_val,"lat")){
lon_psn_src=1;
lat_psn_src=0;
} /* !lat */
if(att_val) att_val=(char *)nco_free(att_val);
} /* end rcd && att_typ */
} /* !Tempest */
} /* !flg_grd_in_2D */
if(flg_grd_out_2D){
lon_psn_dst=0;
lat_psn_dst=1;
if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){
att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng);
if(att_val){
if(strstr(att_val,"lat")){
lon_psn_dst=1;
lat_psn_dst=0;
} /* !lat */
if(att_val) att_val=(char *)nco_free(att_val);
} /* end rcd && att_typ */
} /* !Tempest */
} /* !flg_grd_out_2D */
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */
const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */
const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */
double *area_out; /* [sr] Area of destination grid */
double *frc_out=NULL; /* [frc] Fraction of destination grid */
double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */
double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */
double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */
double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */
double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */
double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */
double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */
double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */
double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */
double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */
double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */
double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */
double *wgt_raw; /* [frc] Remapping weights */
int *col_src_adr; /* [idx] Source address (col) */
int *row_dst_adr; /* [idx] Destination address (row) */
int *msk_out=NULL; /* [flg] Mask on destination grid */
int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */
int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */
long *dmn_cnt_in=NULL;
long *dmn_cnt_out=NULL;
long *dmn_cnt=NULL;
long *dmn_srt=NULL;
long *dmn_srd=NULL;
long idx; /* [idx] Counting index for unrolled grids */
/* Allocate space to hold dimension metadata for destination grid */
dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long));
dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long));
dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long));
dmn_srt[0]=0L;
dmn_cnt[0]=mpf.src_grid_rank;
dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT));
rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=mpf.dst_grid_rank;
dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT));
rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT);
/* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */
if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){
(void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]);
dmn_sz_in_int[0]=mpf.src_grid_size;
} /* !bug */
if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){
(void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]);
dmn_sz_out_int[0]=mpf.dst_grid_size;
} /* !bug */
long col_nbr_in; /* [idx] Number of columns in source grid */
long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */
long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */
const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */
const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */
if(flg_grd_in_1D){
col_nbr_in=dmn_sz_in_int[0];
lon_nbr_in=dmn_sz_in_int[0];
lat_nbr_in=dmn_sz_in_int[0];
}else if(flg_grd_in_2D){
col_nbr_in=0;
lon_nbr_in=dmn_sz_in_int[lon_psn_src];
lat_nbr_in=dmn_sz_in_int[lat_psn_src];
/* Sanity-check */
assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in);
} /* !src_grid_rank */
const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */
int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */
long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */
long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */
long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */
long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */
long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */
if(flg_grd_out_1D){
bnd_nbr_out=mpf.dst_grid_corners;
col_nbr_out=dmn_sz_out_int[0];
lat_nbr_out=dmn_sz_out_int[0];
lon_nbr_out=dmn_sz_out_int[0];
/* Sanity-check */
assert(col_nbr_out == (long)grd_sz_out);
}else if(flg_grd_out_2D){
col_nbr_out=lat_nbr_out*lon_nbr_out;
lat_nbr_out=dmn_sz_out_int[lat_psn_dst];
lon_nbr_out=dmn_sz_out_int[lon_psn_dst];
slat_nbr_out=lat_nbr_out-1L;
slon_nbr_out=lon_nbr_out;
/* Sanity-check */
assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out);
} /* !dst_grid_rank */
/* Ensure coordinates are in degrees not radians for simplicity and CF-compliance
NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */
nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */
char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */
att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng);
if(att_val){
/* Match "radian" and "radians" */
if(strstr(att_val,"radian")) flg_crd_rdn=True;
if(att_val) att_val=(char *)nco_free(att_val);
} /* end rcd && att_typ */
nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */
nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */
const nc_type crd_typ_out=NC_DOUBLE;
if(flg_grd_out_2D){
lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out));
lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out));
lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out));
lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out));
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_out;
rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out);
rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=grd_sz_out;
dmn_cnt[1]=mpf.dst_grid_corners;
rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out);
rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out);
/* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */
flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */
if(flg_grd_out_crv){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular");
}else{
long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */
for(idx=0;idx<(long)grd_sz_out;idx++){
if(idx%lon_nbr_out == 0) idx_tst=idx;
if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break;
// (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]);
/* fxm: also test lon */
} /* !rectangular */
if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True;
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular");
} /* !flg_grd_out_crv */
if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners;
if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */
} /* !flg_grd_out_2D */
if(nco_dbg_lvl_get() >= nco_dbg_scl){
(void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ));
(void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out);
} /* endif dbg */
/* Allocate space for and obtain coordinates */
if(flg_grd_out_1D){
lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out));
lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out));
lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out));
lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out));
} /* !flg_grd_out_1D */
if(flg_grd_out_rct){
if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out);
if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out);
if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out);
if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out);
lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out));
lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out));
lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out));
lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out));
lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out));
lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out));
lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out));
lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out));
lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out));
} /* !flg_grd_out_rct */
/* Arrays unroll into all longitudes for first latitude, then second latitude, ...
Obtain longitudes by reading first block contiguously (unstrided)
Obtain latitudes by reading unrolled data with stride of lon_nbr */
if(flg_grd_out_1D){
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr_out;
rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out);
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr_out;
rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=col_nbr_out;
dmn_cnt[1]=bnd_nbr_out;
rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=col_nbr_out;
dmn_cnt[1]=bnd_nbr_out;
rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out);
if(flg_crd_rdn){
for(idx=0;idx<col_nbr_out;idx++){
lon_ctr_out[idx]*=rdn2dgr;
lat_ctr_out[idx]*=rdn2dgr;
} /* !idx */
for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){
lon_bnd_out[idx]*=rdn2dgr;
lat_bnd_out[idx]*=rdn2dgr;
} /* !idx */
} /* !rdn */
/* Is 1D interface information usable? Yes, unless if all interfaces are zeros
NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */
flg_bnd_1D_usable=True;
for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++)
if(lon_bnd_out[idx] != 0.0) break;
if(idx == col_nbr_out*bnd_nbr_out){
flg_bnd_1D_usable=False;
}else{
for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++)
if(lat_bnd_out[idx] != 0.0) break;
if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False;
} /* !usable */
if(nco_dbg_lvl_get() >= nco_dbg_crr){
for(idx=0;idx<lat_nbr_out;idx++){
(void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]);
for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++)
(void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", ");
} /* end loop over lat */
for(idx=0;idx<lon_nbr_out;idx++){
(void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]);
for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++)
(void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", ");
} /* end loop over lon */
} /* endif dbg */
} /* !flg_grd_out_1D */
if(flg_grd_out_rct){
/* fxm: sub-sample these from the already-read ctr/crn arrays */
dmn_srt[0L]=0L;
dmn_cnt[0L]=lon_nbr_out;
rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out);
dmn_srt[0L]=0L;
dmn_cnt[0L]=lat_nbr_out;
dmn_srd[0L]=lon_nbr_out;
rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out);
dmn_srt[0L]=dmn_srt[1]=0L;
dmn_cnt[0L]=lon_nbr_out;
dmn_cnt[1]=mpf.dst_grid_corners;
rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out);
dmn_srt[0L]=0L;
dmn_cnt[0L]=lat_nbr_out;
dmn_srd[0L]=lon_nbr_out;
dmn_srt[1]=0L;
dmn_cnt[1]=mpf.dst_grid_corners;
dmn_srd[1]=1L;
rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out);
if(flg_crd_rdn){
for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr;
for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr;
for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr;
for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr;
} /* !rdn */
} /* !flg_grd_out_rct */
if(flg_grd_out_crv){
if(flg_crd_rdn){
for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr;
for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr;
for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr;
for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr;
} /* !rdn */
} /* !flg_grd_out_crv */
/* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */
area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out));
dmn_srt[0L]=0L;
dmn_cnt[0L]=grd_sz_out;
rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out);
frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out));
dmn_srt[0L]=0L;
dmn_cnt[0L]=grd_sz_out;
rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out);
if(msk_dst_id != NC_MIN_INT){
msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT));
dmn_srt[0L]=0L;
dmn_cnt[0L]=grd_sz_out;
rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT);
} /* !msk */
/* Derive 2D interface boundaries from lat and lon grid-center values
NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible
However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b)
For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0
Latitude = -90 is, by definition, not a midpoint coordinate
This appears to be an artifact of the non-physical representation of the FV grid, i.e.,
a grid center located at the pole where longitudes collapse in the model, but cannot be
represented as collapsed on a rectangular 2D grid with non-zero areas.
Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole
so that applications can easily diagnose an FV grid when they read-in datasets.
A superior application could diagnose FV just fine from actual non-polar gridcell centers
Maybe ESMF could introduce a flag or something to indicate/avoid this special case?
Safer to read boundary interfaces directly from grid corner/vertice arrays in map file
Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows
Do not implement this procedure until resolving midpoint/center issue described above:
lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation
lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation
for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]);
for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]);
lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0;
lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */
if(flg_grd_out_rct){
double lon_spn; /* [dgr] Longitude span */
double lat_spn; /* [dgr] Latitude span */
nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */
if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False;
/* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */
for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx];
/* 20201009
The four possible CCW RLL orderings start with the ul, ll, lr, or ur vertice
NCO grid generators store vertices in order (0,1,2,3)=(ul,ll,lr,ur)
NCO final latitude is in upper vertices (0,3) for S2N grids, lower vertices (1,2) for N2S grids
NCO final longitude is in RHS vertices (2,3) for S2N and N2S grids
Need generic algorithm to pick easternmost longitude for any of the four CCW orderings
What is ESMF vertice ordering? or does ESMF always copy from input grid?
Most grid generators probably start with ul or ll so vertice 2 is good choice for easternmost */
// lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; // ESMF?
lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-2L]; // NCO lr
if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-1L]; // NCO ur
if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-3L]; // NCO ll
assert(lon_ntf_out[lon_nbr_out-1] != lon_ntf_out[lon_nbr_out]);
lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L];
for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx];
if(flg_s2n) lat_ntf_out[lat_nbr_out]=max_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); else lat_ntf_out[lat_nbr_out]=min_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]);
assert(lat_ntf_out[lat_nbr_out] != lat_ntf_out[lat_nbr_out-1]);
lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]);
/* Place 1-D rectangular interfaces into 2-D coordinate boundaries */
for(idx=0L;idx<lon_nbr_out;idx++){
lon_bnd_out[2L*idx]=lon_ntf_out[idx];
lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L];
} /* !lon_nbr_out */
for(idx=0L;idx<lat_nbr_out;idx++){
lat_bnd_out[2L*idx]=lat_ntf_out[idx];
lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L];
} /* !lat_nbr_out */
if(nco_dbg_lvl_get() >= nco_dbg_crr){
for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]);
for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]);
} /* endif dbg */
/* Global or regional grid? */
nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */
if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn;
/* Diagnose type of latitude output grid by testing second latitude center against formulae */
double lat_ctr_tst_eqa;
double lat_ctr_tst_fv;
if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out;
if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L);
double lat_ctr_tst_gss;
/* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work"
Hence some comparisons cast from double to float before comparison
20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits
20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006
20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */
if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa;
if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv;
double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision
if(nco_grd_lat_typ == nco_grd_lat_nil){
/* Check for Gaussian grid */
double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision
lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double));
wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double));
(void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out);
lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]);
/* Gaussian weights on output grid will be double-precision accurate
Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */
if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss);
if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss;
if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out);
} /* !Gaussian */
if(nco_grd_lat_typ == nco_grd_lat_nil){
/* If still of unknown type, this 2D grid may be weird
This occurs, e.g., with POP3 destination grid
Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */
nco_grd_lat_typ=nco_grd_lat_unk;
} /* !nil */
/* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */
if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk;
else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss;
else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv;
else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa;
else assert(False);
if(nco_grd_lon_typ == nco_grd_lon_nil){
/* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps
map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws
map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */
if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr;
else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr;
else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst;
else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst;
else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb;
else nco_grd_lon_typ=nco_grd_lon_unk;
} /* !nco_grd_lon_typ */
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ));
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ));
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn));
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out));
slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out));
slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out));
for(idx=0L;idx<slat_nbr_out;idx++){
slat_ctr_out[idx]=lat_ntf_out[idx+1L];
slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */
} /* !lat_nbr_out */
for(idx=0L;idx<slon_nbr_out;idx++){
slon_ctr_out[idx]=lon_ntf_out[idx];
} /* !lat_nbr_out */
} /* !nco_grd_lat_fv */
switch(nco_grd_lat_typ){
case nco_grd_lat_eqa:
case nco_grd_lat_fv:
for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */
break;
case nco_grd_lat_gss:
for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx];
if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out);
break;
case nco_grd_lat_unk:
for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0;
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm);
break;
default:
nco_dfl_case_generic_err(); break;
} /* end nco_grd_lat_typ switch */
/* Fuzzy test of latitude weight normalization */
lat_wgt_ttl=0.0;
for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx];
if(nco_grd_lat_typ == nco_grd_lat_eqa || nco_grd_lat_typ == nco_grd_lat_fv){
double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */
lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */
assert(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) < eps_rlt);
if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */
} /* !nco_grd_lat_eqa, !nco_grd_lat_fv */
} /* !flg_grd_out_rct */
/* When possible, ensure area_out is non-zero
20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used"
Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used
20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0
20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0
20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation
Check whether NCO must diagnose and provide its own area_out */
/* If area_out contains any zero... */
for(idx=0;idx<(long)grd_sz_out;idx++)
if(area_out[idx] == 0.0) break;
if(idx != (long)grd_sz_out){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx);
} /* !zero */
for(idx=0;idx<(long)grd_sz_out;idx++)
if(area_out[idx] != 0.0) break;
if(idx == (long)grd_sz_out){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm);
if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm);
flg_dgn_area_out=True;
}else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm);
flg_dgn_area_out=True;
}else if(flg_grd_out_1D && flg_bnd_1D_usable){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm);
flg_dgn_area_out=True;
}else{ /* !1D */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm);
} /* !2D */
} /* !area */
if(flg_dgn_area_out){
if(flg_grd_out_1D && flg_bnd_1D_usable){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n");
/* Area of unstructured grids requires spherical trigonometry */
nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out);
} /* !1D */
if(flg_grd_out_crv){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n");
/* Area of curvilinear grids requires spherical trigonometry */
nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out);
} /* !flg_grd_out_crv */
if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){
/* Mr. Enenstein and George O. Abell taught me the area of spherical zones
Spherical zone area is exact and faithful to underlying rectangular equi-angular grid
However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs
fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */
for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++)
for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++)
area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */
} /* !spherical zones */
} /* !flg_dgn_area_out */
if(rgr->tst == -1){
/* Passing --rgr tst=-1 causes regridder to fail here
This failure should cause host climo script to abort */
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !tst */
/* Verify that frc_out is sometimes non-zero
ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero."
SCRIP: Similar to ESMF
For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids
How does an input ocean grid indicate that, say, half the gridcell is land and half ocean?
Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional?
In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */
for(idx=0;idx<(long)grd_sz_out;idx++)
if(frc_out[idx] != 0.0) break;
if(idx == (long)grd_sz_out){
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !always zero */
/* Test whether frc_out is ever zero... */
for(idx=0;idx<(long)grd_sz_out;idx++)
if(frc_out[idx] == 0.0) break;
if(nco_dbg_lvl_get() >= nco_dbg_std)
if(idx != (long)grd_sz_out)
(void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx = %ld)\n",nco_prg_nm_get(),fnc_nm,idx);
/* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy
However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?)
Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */
nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */
nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */
double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */
double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */
long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */
for(idx=0;idx<(long)grd_sz_out;idx++){
frc_out_dff_one=fabs(frc_out[idx]-1.0);
if(frc_out_dff_one > frc_out_dff_one_max){
frc_out_dff_one_max=frc_out_dff_one;
idx_max_dvn=idx;
} /* !max */
} /* !idx */
if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False;
nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */
if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */
((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */
(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */
&& True){
flg_frc_nrm=True;
/* Avoid writing frc_out unless discrepancies are particularly egregious
Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */
double eps_rlt_wrt_thr=3.0e-13;
/* 20181104: Never write frac_b for CMIP6! */
/* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]);
if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr);
} /* !sometimes non-unity */
if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_nrm && rgr->flg_rnr){
// 20190918: Weaken from WARNING to INFO because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr
(void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize partially overlapped destination gridcells (i.e., gridcells with non-unity frc_dst = dst_frac = frac_b) to preserve mean-value of valid fraction of source gridcells (usually most useful for state variables), rather than dilute valid-fraction mean over total destination gridcell area to preserve area-integral of source data (the default, often most useful for ensuring global conservation of fluxes).\n",nco_prg_nm_get(),fnc_nm);
//(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ));
//nco_exit(EXIT_FAILURE);
} /* !flg_rnr */
/* Detailed summary of 2D grids now available including quality-checked coordinates and area */
if(flg_grd_out_2D && nco_dbg_lvl_get() >= nco_dbg_sbr){
lat_wgt_ttl=0.0;
area_out_ttl=0.0;
if(flg_grd_out_rct){
(void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm);
for(idx=0;idx<lat_nbr_out;idx++)
lat_wgt_ttl+=lat_wgt_out[idx];
} /* !flg_grd_out_rct */
for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++)
for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++)
area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx];
(void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI));
if(flg_grd_out_rct){
for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]);
for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]);
for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]);
} /* !flg_grd_out_rct */
if(nco_dbg_lvl_get() > nco_dbg_crr)
for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++)
for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++)
(void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]);
assert(area_out_ttl > 0.0);
assert(area_out_ttl <= 4.0*M_PI + 5.0e-15);
} /* !flg_grd_out_2D && !dbg */
/* Allocate space for and obtain weights and addresses */
wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights");
col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses");
row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses");
/* Obtain remap matrix addresses and weights from map file */
dmn_srt[0]=0L;
dmn_cnt[0]=mpf.num_links;
rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT);
rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=mpf.num_links;
if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){
rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE);
}else{
/* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts]
Apply only first weight for first-order conservative accuracy (i.e., area overlap)
Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */
dmn_srd[0]=1L;
dmn_srt[1]=0L;
dmn_cnt[1]=1L;
dmn_srd[1]=mpf.num_wgts;
rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE);
} /* !SCRIP */
/* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize later access with C indices */
size_t lnk_nbr; /* [nbr] Number of links */
size_t lnk_idx; /* [idx] Link index */
lnk_nbr=mpf.num_links;
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--;
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--;
if(nco_dbg_lvl_get() >= nco_dbg_io){
(void)fprintf(stdout,"idx row_dst col_src wgt_raw\n");
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]);
} /* endif dbg */
/* Free memory associated with input file */
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt);
if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd);
/* Close input netCDF file */
nco_close(in_id);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
/* Above this line, fl_in and in_id refer to map file
Below this line, fl_in and in_id refer to input file to be regridded */
/* Initialize */
in_id=rgr->in_id;
out_id=rgr->out_id;
/* Sanity check that input data file matches expectations from mapfile */
char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */
char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */
char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */
int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */
int dmn_id_lat; /* [id] Dimension ID */
int dmn_id_lon; /* [id] Dimension ID */
/* 20160503 Discover coordinates via CF Convention if indicated
This copies method used in nco_grd_nfr() */
/* Begin CF-coordinates block */
cf_crd_sct *cf=NULL;
char *rgr_var; /* [sng] Variable for special regridding treatment */
nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */
rgr_var=rgr->var_nm;
if(rgr_var){
/* Infer grid from special variable
Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g.,
ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates
4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ;
Usage:
ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */
char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */
cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct));
cf->crd=False; /* [flg] CF coordinates information is complete */
cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */
cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */
cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */
cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */
cf->crd_sng=NULL; /* [sng] Coordinates attribute value */
cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */
cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */
cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */
cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */
cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */
cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */
cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */
cf->var_nm=NULL; /* [sng] Coordinates variable name */
cf->var_type=NC_NAT; /* [enm] Coordinates variable type */
if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var);
goto skp_cf;
} /* !rcd */
cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng);
if(cf->crd_sng){
cf->crd=True;
}else{ /* !rcd && att_typ */
(void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var);
goto skp_cf;
} /* !rcd && att_typ */
/* Valid coordinates attribute requires two coordinate names separated by space character */
char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */
char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */
char *spc_ptr; /* [sng] Pointer to space character (' ') */
int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */
int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */
int crd_idx=0; /* [idx] Counter for coordinate names */
for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL;
crd_dpl=(char *)strdup(cf->crd_sng);
/* Search for spaces starting from end of string */
while((spc_ptr=strrchr(crd_dpl,' '))){
crd_nm[crd_nbr]=spc_ptr+1L;
crd_nbr++;
/* NUL-terminate so next search ends here */
*spc_ptr='\0';
} /* !sbs_ptr */
/* Final coordinate name begins where coordinate string starts */
crd_nm[crd_nbr]=crd_dpl;
/* Change crd_nbr from 0-based index to actual coordinate number */
crd_nbr++;
if(crd_nbr < 2){
(void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng);
goto skp_cf;
} /* !crd_nbr */
/* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */
crd_idx=crd_spt=0;
while(crd_spt < 2 && crd_idx < crd_nbr){
cf->crd_nm[crd_spt]=crd_nm[crd_idx];
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){
cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng);
if(cf->unt_sng[crd_spt]){
if(strcasestr(cf->unt_sng[crd_spt],"degree")){
/* Increment count of spatial-like coordinates... */
crd_spt++;
}else{
/* ...or free() memory allocated during search */
cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]);
} /* !strcasestr() */
crd_idx++;
} /* !rcd && att_typ */
} /* !rcd */
} /* !crd_spt */
/* If while()-loop above was successful, our search is over
Otherwise, use first two coordinate names regardless of units, and print more diagnostics */
if(crd_spt < 2){
cf->crd_nm[0]=crd_nm[0];
cf->crd_nm[1]=crd_nm[1];
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]);
goto skp_cf;
} /* !rcd */
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]);
goto skp_cf;
} /* !rcd */
cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng);
if(cf->unt_sng[0]){
if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]);
} /* !rcd && att_typ */
cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng);
if(cf->unt_sng[1]){
if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]);
} /* !rcd && att_typ */
} /* !crd_spt */
int crd_rnk; /* [nbr] Coordinate rank */
rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk);
if(crd_rnk != 2){
(void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk);
goto skp_cf;
} /* !crd_rnk */
rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id);
cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR));
cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR));
rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]);
rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]);
/* "coordinates" convention does not guarantee lat, lon are specified in that order
Use "units" values, if any, to determine order
In absence of "units", assume order is lat, lon */
nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */
nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */
nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */
nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */
if(cf->unt_sng[0]){
if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True;
if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True;
} /* endif */
if(cf->unt_sng[1]){
if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True;
if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True;
} /* endif */
assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat));
int idx_lat;
int idx_lon;
if(crd0_is_lat && crd1_is_lon){
idx_lat=0;
idx_lon=1;
}else{
idx_lat=1;
idx_lon=0;
} /* endif */
/* Dimensions and coordinates have been vetted. Store as primary lookup names.
Dimensions are always returned in order [LRV,MRV]=[0,1]
LRV is along-track direction, and MRV is across-track (at least in NASA data)
Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */
dmn_id_lat=cf->dmn_id[0];
dmn_id_lon=cf->dmn_id[1];
/* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */
lat_nm_in=strdup(cf->dmn_nm[0]);
lon_nm_in=strdup(cf->dmn_nm[1]);
//lat_nm_in=strdup(cf->crd_nm[idx_lat]);
//lon_nm_in=strdup(cf->crd_nm[idx_lon]);
/* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */
//lat_ctr_id=cf->crd_id[idx_lat];
//lon_ctr_id=cf->crd_id[idx_lon];
//lat_dmn_nm=strdup(cf->dmn_nm[0]);
//lon_dmn_nm=strdup(cf->dmn_nm[1]);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)");
/* Clean-up CF coordinates memory */
if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl);
if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng);
if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]);
if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]);
if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]);
if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]);
// if(foo) foo=(char *)nco_free(foo);
} /* !rgr_var */
/* goto skp_cf */
skp_cf:
/* free() any abandoned cf structure now */
if(!flg_cf)
if(cf) cf=(cf_crd_sct *)nco_free(cf);
rcd=NC_NOERR;
/* End CF-coordinates block */
if(flg_grd_in_1D){
long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */
/* Check default or command-line option first, then search usual suspects, and if that fails then guess
unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS
Using internal database names first ensures users can pick between multiple dimensions of size n_a
20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids?
Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */
if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */
else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */
else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */
else if((rcd=nco_inq_dimid_flg(in_id,"ncol_d",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM dynamics grid */
else if((rcd=nco_inq_dimid_flg(in_id,"ncol_p",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM physics grid */
else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */
/* 20180605: Database matches to above names may be false-positives
ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use
NCO only prints dimensions used in variables, while ncdump prints all dimensions
From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno
Check that matched dimension has expected size: */
if(dmn_id_col != NC_MIN_INT){
rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat);
if(col_nbr_in != col_nbr_in_dat){
dmn_id_col=NC_MIN_INT;
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat);
} /* !col_nbr_in */
}else{
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm);
} /* !dmn_id_col */
if(dmn_id_col == NC_MIN_INT){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm);
/* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */
int *dmn_ids_in; /* [nbr] Input file dimension IDs */
int dmn_nbr_in; /* [nbr] Number of dimensions in input file */
const int flg_prn=0; /* [enm] Parent flag */
rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn);
dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn);
/* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
dmn_id_col=dmn_ids_in[dmn_idx];
rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat);
if(col_nbr_in == col_nbr_in_dat){
rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in);
break;
} /* !col_nbr_in */
} /* !dmn_idx */
if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in);
if(dmn_idx == dmn_nbr_in){
dmn_id_col=NC_MIN_INT;
(void)fprintf(stdout,"%s: WARNING received a map-file constructed to process data on an unstructured (one-dimensional) grid, but %s (aka \"the regridder\") cannot find a dimension in the input data file (or, with ncremap, a possibly already subsetted intermediate file) that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld.\nHINT: Ensure at least one member of the variable extraction list has a spatial dimension of size = %ld\n",nco_prg_nm_get(),fnc_nm,col_nbr_in,col_nbr_in);
(void)fprintf(stdout,"%s: INFO %s reports a third, last-ditch (aka \"Hail Mary\") workaround may work. The Hail-Mary allows logically 1D map-files to regrid logically 2D datasets, so long as the product of the horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size.\n",nco_prg_nm_get(),fnc_nm);
/* Hail Mary algorithm: Use following 2D input grid block to identify horizontal coordinates and dimensions */
flg_grd_in_1D_dat_in_2D=True;
flg_grd_in_2D=True;
//nco_exit(EXIT_FAILURE);
} /* !dmn_idx */
} /* !col_nm_in */
} /* !1D */
if(flg_grd_in_2D){
long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */
if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude");
else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat");
else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude");
else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat");
else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */
else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag");
else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */
else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */
else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */
else if((rcd=nco_inq_dimid_flg(in_id,"lsmlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lsmlat"); /* CISM/CLM/ELM */
else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */
else if((rcd=nco_inq_dimid_flg(in_id,"rlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */
else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */
else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */
else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */
else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */
else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude");
else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */
else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */
else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */
else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */
else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */
else if((rcd=nco_inq_dimid_flg(in_id,"ygrid",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid"); /* SSM/I */
else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */
else{
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !lat */
rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat);
if(lat_nbr_in != lat_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat);
nco_exit(EXIT_FAILURE);
} /* !err */
long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */
if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */;
else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude");
else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon");
else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude");
else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon");
else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */
else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag");
else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */
else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */
else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */
else if((rcd=nco_inq_dimid_flg(in_id,"lsmlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lsmlon"); /* CISM/CLM/ELM */
else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */
else if((rcd=nco_inq_dimid_flg(in_id,"rlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* POP */
else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */
else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */
else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */
else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */
else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */
else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */
else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */
else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */
else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */
else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */
else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */
else if((rcd=nco_inq_dimid_flg(in_id,"xgrid",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid"); /* SSM/I */
else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */
else{
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !lat */
rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat);
if(lon_nbr_in != lon_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat);
nco_exit(EXIT_FAILURE);
} /* !err */
if(flg_grd_in_1D_dat_in_2D){
if(lon_nbr_in_dat*lat_nbr_in_dat == col_nbr_in){
(void)fprintf(stdout,"%s: INFO %s Hail Mary algorithm reports tentative success in that product of identifed horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in);
lat_nbr_in=lat_nbr_in_dat;
lon_nbr_in=lon_nbr_in_dat;
}else{ /* !col_nbr_in */
(void)fprintf(stdout,"%s: ERROR %s Hail Mary algorithm reports final failure since product of identifed horizontal dimension sizes in the 2D input data file does not equal the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in);
nco_exit(EXIT_FAILURE);
} /* !col_nbr_in */
} /* !flg_grd_in_1D_dat_in_2D */
} /* !2D */
/* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks
If necessary, use remap data to diagnose them from scratch
Other extensive variables (like counts, population) will be extracted and summed not averaged */
/* Exception list source:
ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option)
AMSR: Latitude, Longitude
CAM, CERES, CMIP5: lat, lon
CAM, CMIP5: gw, lat_bnds, lon_bnds
CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid)
CAM-SE, EAM, MOSART: area
CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid)
CISM/CLM/ELM: LATIXY, LONGXY (glacier mask files)
DSCOVR L2: latitude, longitude
ESMF: gridcell_area
GPM: S1_Latitude, S1_Longitude
HIRDLS: Latitude
MAR/RACMO: LAT, LON
MLS: CO_Latitude
MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below
NCO: lat_vertices, lon_vertices
NEMO: nav_lat, nav_lon
NWS HRRR: gridlat_0, gridlon_0
OCO2: latitude_bnds, longitude_bnds
OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints
Oxford: global_latitude0, global_longitude0, latitude0, longitude0
POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds)
RACMO: rlat, rlon
TRMM: Latitude, Longitude
UV-CDAT regridder: bounds_lat, bounds_lon
Unknown: XLAT_M, XLONG_M
WRF: XLAT, XLONG */
const int var_xcl_lst_nbr=53; /* [nbr] Number of objects on exclusion list */
const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/rlat","/slat","/LATIXY","/LONGXY","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/rlon","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"};
int var_cpy_nbr=0; /* [nbr] Number of copied variables */
int var_rgr_nbr=0; /* [nbr] Number of regridded variables */
int var_xcl_nbr=0; /* [nbr] Number of deleted variables */
int var_crt_nbr=0; /* [nbr] Number of created variables */
int var_xtn_nbr=0; /* [nbr] Number of extensive variables */
unsigned int idx_tbl; /* [idx] Counter for traversal table */
const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */
for(idx=0;idx<var_xcl_lst_nbr;idx++){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++)
if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break;
if(idx_tbl < trv_nbr){
if(trv_tbl->lst[idx_tbl].flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll);
var_xcl_nbr++;
} /* endif */
trv_tbl->lst[idx_tbl].flg_xtr=False;
} /* endif */
} /* !idx */
cnv_sct *cnv; /* [sct] Convention structure */
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
if(cnv->MPAS){
/* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded
20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex
20180917: Restrict exclusion list to a subset of variables with nCells-dimension
Six nCells-variables may be valuable when regridded to lat/lon
mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables:
areaCell for comparison to area(lat,lon)
cellMask for area-weighted mask
maxLevelCell for area-weighted underwater topographic mask
xCell, yCell, zCell for area-weighted cartesian coordinates
20180918: Regridder currently only works on cell-based coordinates
Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them
Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges
How would ERWG or TempestRemap handle that?
MPAS geophysical variables on vertex-based (not cell-based) coordinates include:
avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1
MPAS geophysical variables on edge-based (not cell-based) coordinates include:
principalStress1Var_1, principalStress2Var_1 */
const int mpas_xcl_lst_nbr=35;
const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"};
for(idx=0;idx<mpas_xcl_lst_nbr;idx++){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++)
if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break;
if(idx_tbl < trv_nbr){
if(trv_tbl->lst[idx_tbl].flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll);
var_xcl_nbr++;
} /* endif */
trv_tbl->lst[idx_tbl].flg_xtr=False;
} /* endif */
} /* !idx */
} /* !MPAS */
char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */
int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */
int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */
nco_bool has_lon; /* [flg] Contains longitude dimension */
nco_bool has_lat; /* [flg] Contains latitude dimension */
trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */
/* Define regridding flag for each variable */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn;
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
has_lon=False;
has_lat=False;
if(flg_grd_in_2D){
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
/* Pre-determine flags necessary during next loop */
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
/* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */
if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in);
if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in);
} /* end loop over dimensions */
} /* !flg_grd_in_2D */
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm;
/* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */
if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){
trv_tbl->lst[idx_tbl].flg_rgr=True;
var_rgr_nbr++;
break;
} /* endif */
} /* end loop over dimensions */
if(dmn_idx == dmn_nbr_in){
/* Not regridded, so must be omitted or copied... */
if(flg_grd_in_2D && (has_lon || has_lat)){
/* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds)
These could be salvaged with explicit rules or implicit assumptions */
trv_tbl->lst[idx_tbl].flg_xtr=False;
var_xcl_nbr++;
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll);
}else{ /* !omitted */
/* Copy all variables that are not regridded or omitted */
var_cpy_nbr++;
} /* !omitted */
} /* endif not regridded */
} /* end nco_obj_typ_var */
} /* end idx_tbl */
if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm);
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.flg_rgr){
for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){
/* 20150927: Extensive variable treatments are still in alpha-development
Currently testing on AIRS TSurfStd_ct (by summing not averaging)
In future may consider variables that need more complex (non-summing) extensive treatment
MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity
Not to mention the variables that depend on nEdges and nVertices... */
if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){
trv_tbl->lst[idx_tbl].flg_xtn=True;
var_xtn_nbr++;
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll);
} /* !strcmp */
} /* !xtn_idx */
} /* !flg_rgr */
} /* !idx_tbl */
if(nco_dbg_lvl_get() >= nco_dbg_sbr){
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No");
} /* end idx_tbl */
} /* end dbg */
/* Lay-out regridded file */
aed_sct aed_mtd;
char *area_nm_out;
char *att_nm;
char *bnd_nm_out;
char *bnd_tm_nm_out;
char *col_nm_out;
char *frc_nm_out;
char *lat_bnd_nm_out;
char *lat_dmn_nm_out;
char *lat_nm_out;
char *lat_wgt_nm;
char *lon_bnd_nm_out;
char *lon_dmn_nm_out;
char *lon_nm_out;
char *msk_nm_out;
char *slat_nm_out=NULL;
char *slat_wgt_nm_out=NULL;
char *slon_nm_out=NULL;
int dmn_id_bnd; /* [id] Dimension ID */
int dmn_id_bnd_tm; /* [id] Dimension ID */
int dmn_id_slat; /* [id] Dimension ID */
int dmn_id_slon; /* [id] Dimension ID */
int area_out_id; /* [id] Variable ID for area */
int frc_out_id; /* [id] Variable ID for fraction */
int lon_out_id; /* [id] Variable ID for longitude */
int lat_out_id; /* [id] Variable ID for latitude */
int lat_wgt_id; /* [id] Variable ID for latitude weight */
int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */
int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */
int msk_out_id; /* [id] Variable ID for mask */
int slat_out_id; /* [id] Variable ID for staggered latitude */
int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */
int slon_out_id; /* [id] Variable ID for staggered longitude */
int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */
long dmn_srt_out[dmn_nbr_grd_max];
long dmn_cnt_tuo[dmn_nbr_grd_max];
/* Name output dimensions/variables */
area_nm_out=rgr->area_nm;
bnd_tm_nm_out=rgr->bnd_tm_nm;
frc_nm_out=rgr->frc_nm;
lat_bnd_nm_out=rgr->lat_bnd_nm;
lat_wgt_nm=rgr->lat_wgt_nm;
lon_bnd_nm_out=rgr->lon_bnd_nm;
msk_nm_out=rgr->msk_nm;
/* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */
if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in;
if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in;
if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in;
if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in;
if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in;
if(flg_grd_out_1D){
bnd_nm_out=rgr->vrt_nm;
lat_bnd_nm_out=rgr->lat_vrt_nm;
lon_bnd_nm_out=rgr->lon_vrt_nm;
} /* !flg_grd_out_1D */
if(flg_grd_out_crv){
bnd_nm_out=rgr->bnd_nm;
} /* !flg_grd_out_crv */
if(flg_grd_out_rct){
bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */
} /* !flg_grd_out_rct */
if(flg_grd_out_2D){
lat_bnd_nm_out=rgr->lat_bnd_nm;
lon_bnd_nm_out=rgr->lon_bnd_nm;
} /* !flg_grd_out_2D */
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
slat_nm_out=strdup("slat");
slat_wgt_nm_out=strdup("w_stag");
slon_nm_out=strdup("slon");
} /* !nco_grd_lat_fv */
/* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */
if(bnd_nbr_out != bnd_tm_nbr_out){
if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){
(void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out);
} /* !strcmp() */
} /* !bnd_nbr_out */
/* Persistent metadata */
aed_sct aed_mtd_crd;
char *att_val_crd=NULL;
char *att_nm_crd=NULL;
att_nm_crd=strdup("coordinates");
aed_mtd_crd.att_nm=att_nm_crd;
if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete;
aed_mtd_crd.type=NC_CHAR;
aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L;
att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type));
(void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out);
aed_mtd_crd.val.cp=att_val_crd;
/* Reminder:
Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output
Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output
ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file
EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */
nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */
nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */
aed_sct aed_mtd_cll_msr;
char *att_nm_cll_msr=NULL;
char *att_val_cll_msr=NULL;
if(flg_cll_msr){
att_nm_cll_msr=strdup("cell_measures");
aed_mtd_cll_msr.att_nm=att_nm_cll_msr;
aed_mtd_cll_msr.mode=aed_overwrite;
aed_mtd_cll_msr.type=NC_CHAR;
att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type));
(void)sprintf(att_val_cll_msr,"area: %s",area_nm_out);
aed_mtd_cll_msr.sz=strlen(att_val_cll_msr);
aed_mtd_cll_msr.val.cp=att_val_cll_msr;
} /* !flg_cll_msr */
/* Define new horizontal dimensions before all else */
if(flg_grd_out_1D){
rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col);
} /* !flg_grd_out_1D */
if(flg_grd_out_2D){
rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat);
rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon);
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat);
rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon);
} /* !nco_grd_lat_fv */
} /* !flg_grd_out_2D */
/* If dimension has not been defined, define it */
rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm);
if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm);
/* If dimension has not been defined, define it */
rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd);
if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd);
char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */
char *var_nm; /* [sng] Variable name */
int *dmn_id_in=NULL; /* [id] Dimension IDs */
int *dmn_id_out=NULL; /* [id] Dimension IDs */
int var_id_in; /* [id] Variable ID */
int var_id_out; /* [id] Variable ID */
nc_type var_typ_out; /* [enm] Variable type to write to disk */
nc_type var_typ_rgr; /* [enm] Variable type used during regridding */
nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
dfl_lvl=rgr->dfl_lvl;
fl_out_fmt=rgr->fl_out_fmt;
/* Define new coordinates and grid variables in regridded file */
if(flg_grd_out_1D){
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
dmn_ids_out[0]=dmn_id_col;
dmn_ids_out[1]=dmn_id_bnd;
rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
dmn_ids_out[0]=dmn_id_col;
dmn_ids_out[1]=dmn_id_bnd;
rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
if(flg_area_out){
rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_area_out */
if(flg_frc_out_wrt){
rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_frc_out_wrt */
if(flg_msk_out){
rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_msk_out */
} /* !flg_grd_out_1D */
if(flg_grd_out_crv){
dmn_ids_out[0]=dmn_id_lat;
dmn_ids_out[1]=dmn_id_lon;
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
if(flg_area_out){
rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_area_out */
if(flg_frc_out_wrt){
rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_frc_out_wrt */
if(flg_msk_out){
rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_msk_out */
dmn_ids_out[0]=dmn_id_lat;
dmn_ids_out[1]=dmn_id_lon;
dmn_ids_out[2]=dmn_id_bnd;
rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_grd_out_crv */
if(flg_grd_out_rct){
rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !nco_grd_lat_fv */
dmn_ids_out[0]=dmn_id_lat;
dmn_ids_out[1]=dmn_id_bnd;
rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
dmn_ids_out[0]=dmn_id_lon;
dmn_ids_out[1]=dmn_id_bnd;
rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
dmn_ids_out[0]=dmn_id_lat;
dmn_ids_out[1]=dmn_id_lon;
if(flg_area_out){
rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_area_out */
if(flg_frc_out_wrt){
rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_frc_out_wrt */
if(flg_msk_out){
rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl);
var_crt_nbr++;
} /* !flg_msk_out */
} /* !flg_grd_out_rct */
/* Add _FillValue to empty destination cells, if requested */
nco_bool flg_add_fll=rgr->flg_add_fll; /* [flg] Add _FillValue to fields with empty destination cells */
nco_bool flg_dst_mpt=False; /* [flg] At least one destination cell is empty */
size_t dst_idx; /* [idx] Index on destination grid */
/* Determine whether any destination cells are, in fact, empty
Logic here could be replaced by examining frac_b variable, if we trust input frac_b...
...and we do trust input frac_b since it is already used for renormalization */
if(flg_add_fll){
if(flg_msk_apl){
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(msk_out[dst_idx] == 0) break;
if(dst_idx < grd_sz_out) flg_dst_mpt=True;
if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --msk_apl) that masked cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L);
}else{
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* For each destination cell... */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ /* ...does any weight... */
if(row_dst_adr[lnk_idx] == dst_idx){ /* ...contribute to that cell? */
/* If so, break lnk_idx loop and continue to next iteration of dst_idx loop */
break;
} /* !row_dst_adr */
} /* !lnk_idx */
/* If weight loop reached end without a match, then this destination cell is empty */
if(lnk_idx == lnk_nbr){
flg_dst_mpt=True;
break;
} /* !lnk_idx */
} /* !dst_idx */
if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --add_fll) that empty cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L);
} /* !flg_msk_apl */
} /* !flg_add_fll */
/* Pre-allocate dimension ID and cnt/srt space */
int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */
int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */
int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */
int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */
rcd+=nco_inq_ndims(in_id,&dmn_nbr_max);
dmn_nbr_max++; /* Safety in case regridding adds dimension */
dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int));
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
/* Identify all record-dimensions in input file */
rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec);
if(dmn_nbr_rec > 0){
dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int));
rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec);
} /* !dmn_nbr_rec */
int flg_pck; /* [flg] Variable is packed on disk */
nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */
double mss_val_dbl;
double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */
/* Define regridded and copied variables in output file */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv_tbl->lst[idx_tbl].flg_mrv=True;
trv=trv_tbl->lst[idx_tbl];
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
var_nm=trv.nm;
/* Preserve input type in output type */
var_typ_out=trv.var_typ;
/* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */
//if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ;
dmn_nbr_in=trv.nbr_dmn;
dmn_nbr_out=trv.nbr_dmn;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out);
/* If variable has not been defined, define it */
if(rcd != NC_NOERR){
if(trv.flg_rgr){
/* Regrid */
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
dmn_in_fst=0;
rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck);
if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm);
/* Is horizontal dimension last, i.e., most-rapidly-varying? */
if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){
if(dmn_idx != dmn_nbr_in-1){
/* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */
(void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, please ask Charlie to add the dimension to the ncremap dimension permutation list.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm);
trv_tbl->lst[idx_tbl].flg_mrv=False;
} /* !dmn_idx */
} /* !flg_grd_in_1D */
if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){
/* Are horizontal dimensions most-rapidly-varying? */
if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){
/* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */
(void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in);
trv_tbl->lst[idx_tbl].flg_mrv=False;
} /* !dmn_idx */
} /* !flg_grd_in_2D */
if(flg_grd_out_1D){
if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){
/* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */
if(!strcmp(dmn_nm,lat_nm_in)){
/* Replace lat with col */
dmn_id_out[dmn_idx]=dmn_id_col;
dmn_cnt[dmn_idx]=col_nbr_out;
} /* endif lat */
if(!strcmp(dmn_nm,lon_nm_in)){
/* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */
dmn_id_out[dmn_idx]=NC_MIN_INT;
dmn_cnt[dmn_idx]=NC_MIN_INT;
dmn_nbr_out--;
/* Reduce output dimension position of all subsequent input dimensions by one */
if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1;
} /* endif lon */
}else{
/* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */
if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx);
else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst);
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst);
} /* !rcd */
} /* !lat && !lon */
} /* !flg_grd_out_1D */
if(flg_grd_out_2D){
if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){
/* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */
dmn_id_out[dmn_idx]=dmn_id_lat;
dmn_id_out[dmn_idx+1]=dmn_id_lon;
dmn_cnt[dmn_idx]=lat_nbr_out;
dmn_cnt[dmn_idx+1]=lon_nbr_out;
dmn_nbr_out++;
/* Increase output dimension position of all subsequent input dimensions by one */
if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1;
}else{
/* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */
if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx);
else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx);
else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst);
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst);
} /* !rcd */
} /* !col */
} /* !1D_to_2D */
} /* !dmn_idx */
}else{ /* !flg_rgr */
/* Replicate non-regridded variables */
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm);
rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx);
if(rcd != NC_NOERR){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx);
/* Check-for and, if found, retain record dimension property */
for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++)
if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx])
dmn_cnt[dmn_idx]=NC_UNLIMITED;
rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx);
} /* !rcd */
} /* !dmn_idx */
} /* !flg_rgr */
rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out);
/* Duplicate netCDF4 settings when possible */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){
/* Deflation */
if(dmn_nbr_out > 0){
int dfl_lvl_in; /* [enm] Deflate level [0..9] */
rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in);
/* Copy original deflation settings */
if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in);
/* Overwrite HDF Lempel-Ziv compression level, if requested */
if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True;
/* Turn-off shuffle when uncompressing otherwise chunking requests may fail */
if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE;
/* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */
if(dfl_lvl >= 0) shuffle=NC_SHUFFLE;
if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl);
} /* !dmn_nbr_out */
} /* !NC_FORMAT_NETCDF4 */
(void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY);
if(trv.flg_rgr){
aed_mtd_crd.var_nm=var_nm;
aed_mtd_crd.id=var_id_out;
(void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd);
if(flg_cll_msr){
aed_mtd_cll_msr.var_nm=var_nm;
aed_mtd_cll_msr.id=var_id_out;
(void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr);
} /* !flg_cll_msr */
/* 20210602: Ensure all regridded variables have _FillValue if user requested _FillValue in empty cells and there are empty cells */
if(flg_add_fll && flg_dst_mpt){
/* Check for _FillValue here iff user requests non-default behavior */
has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL);
if(!has_mss_val){
val_unn mss_val_dfl; /* [] Default _FillValue */
mss_val_dfl=nco_mss_val_dfl_get(var_typ_out);
rcd=nco_put_att(out_id,var_id_out,"_FillValue",var_typ_out,1L,(void *)(&mss_val_dfl));
} /* !has_mss_val */
} /* !flg_add_fll */
} /* !flg_rgr */
} /* !rcd */
} /* !var */
} /* !idx_tbl */
/* Free pre-allocated array space */
/* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */
if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in);
if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in);
if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt);
if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec);
/* Define new metadata in regridded file */
if(flg_area_out){
rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell");
rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle");
rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian");
if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd);
att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char));
(void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out);
rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val);
if(att_val) att_val=(char *)nco_free(att_val);
} /* !flg_area_out */
if(flg_frc_out_wrt){
rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid");
if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd);
att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char));
(void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out);
rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val);
} /* !flg_frc_out_wrt */
if(flg_msk_out){
rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)");
if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd);
} /* !flg_msk_out */
rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude");
rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north");
// 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2
if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y");
double vld_min;
vld_min=-90.0;
att_nm=strdup("valid_min");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lat_nm_out;
aed_mtd.id=lat_out_id;
aed_mtd.sz=1;
aed_mtd.type=NC_DOUBLE;
aed_mtd.val.dp=&vld_min;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lat_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
double vld_max;
vld_max=90.0;
att_nm=strdup("valid_max");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lat_nm_out;
aed_mtd.id=lat_out_id;
aed_mtd.sz=1;
aed_mtd.type=NC_DOUBLE;
aed_mtd.val.dp=&vld_max;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lat_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out);
if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices");
rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val);
rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude");
rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east");
// 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2
if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X");
/* UGRID Conventions define "topology" and "modulo" attributes
https://github.com/ugrid-conventions/ugrid-conventions
My understanding is these should only be utilized for global grids */
if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){
/* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */
att_nm=strdup("modulo");
double modulo=360.0;
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lon_nm_out;
aed_mtd.id=lon_out_id;
aed_mtd.sz=1;
aed_mtd.type=NC_DOUBLE;
aed_mtd.val.dp=&modulo;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lon_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular");
} /* !nco_rgr_grd_2D_to_2D */
if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0;
att_nm=strdup("valid_min");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lon_nm_out;
aed_mtd.id=lon_out_id;
aed_mtd.sz=1;
aed_mtd.type=NC_DOUBLE;
aed_mtd.val.dp=&vld_min;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lon_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0;
att_nm=strdup("valid_max");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lon_nm_out;
aed_mtd.id=lon_out_id;
aed_mtd.sz=1;
aed_mtd.type=NC_DOUBLE;
aed_mtd.val.dp=&vld_max;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lon_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out);
att_nm=strdup("bounds");
att_val=lon_bnd_nm_out;
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=lon_nm_out;
aed_mtd.id=lon_out_id;
aed_mtd.sz=strlen(att_val);
aed_mtd.type=NC_CHAR;
aed_mtd.val.cp=att_val;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,lon_out_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices");
rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val);
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid");
rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north");
rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid");
rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid");
rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east");
} /* !nco_grd_lat_fv */
if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)");
rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in);
rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in);
/* Annotate persistent metadata that should appear last in attribute list */
if(flg_grd_out_1D){
if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd);
if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd);
if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd);
} /* !flg_grd_out_1D */
/* Persistent metadata */
if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd);
if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd);
if(flg_cll_msr){
if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr);
if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr);
} /* !flg_cll_msr */
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out);
if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out);
if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out);
} /* !nco_grd_lat_fv */
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Begin data mode */
(void)nco_enddef(out_id);
/* Write new coordinates and variables to regridded file */
if(flg_grd_out_1D){
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
(void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out);
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
(void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out);
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
dmn_cnt_tuo[1]=bnd_nbr_out;
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out);
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
dmn_cnt_tuo[1]=bnd_nbr_out;
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out);
if(flg_area_out){
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
(void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out);
} /* !flg_area_out */
if(flg_msk_out){
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=col_nbr_out;
(void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT);
} /* !flg_msk_out */
} /* !flg_grd_out_1D */
if(flg_grd_out_crv){
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
dmn_cnt_tuo[1]=lon_nbr_out;
(void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out);
(void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out);
if(flg_area_out){
(void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out);
} /* !flg_area_out */
if(flg_frc_out_wrt){
(void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out);
} /* !flg_frc_out_wrt */
if(flg_msk_out){
(void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT);
} /* !flg_msk_out */
dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
dmn_cnt_tuo[1]=lon_nbr_out;
dmn_cnt_tuo[2]=bnd_nbr_out;
/* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out);
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out);
} /* !flg_grd_out_crv */
if(flg_grd_out_rct){
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
(void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out);
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=lon_nbr_out;
(void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out);
if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=slat_nbr_out;
(void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out);
(void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out);
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=slon_nbr_out;
(void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out);
if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out);
if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out);
if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out);
} /* !nco_grd_lat_fv */
dmn_srt_out[0]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
(void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out);
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
dmn_cnt_tuo[1]=bnd_nbr_out;
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out);
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=lon_nbr_out;
dmn_cnt_tuo[1]=bnd_nbr_out;
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out);
dmn_srt_out[0]=dmn_srt_out[1]=0L;
dmn_cnt_tuo[0]=lat_nbr_out;
dmn_cnt_tuo[1]=lon_nbr_out;
if(flg_area_out){
(void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out);
} /* !flg_area_out */
if(flg_frc_out_wrt){
(void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out);
} /* !flg_frc_out_wrt */
if(flg_msk_out){
(void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT);
} /* !flg_msk_out */
} /* !flg_grd_out_rct */
/* Regrid or copy variable values */
const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */
const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */
char *sgs_frc_nm=NULL;
char *sgs_msk_nm=NULL;
double *sgs_frc_in=NULL;
double *sgs_frc_out=NULL;
double *var_val_dbl_in=NULL;
double *var_val_dbl_out=NULL;
double *wgt_vld_out=NULL;
double var_val_crr;
int *tally=NULL; /* [nbr] Number of valid (non-missing) values */
int lvl_idx; /* [idx] Level index */
int lvl_nbr; /* [nbr] Number of levels */
int thr_idx; /* [idx] Thread index */
size_t idx_in; /* [idx] Input grid index */
size_t idx_out; /* [idx] Output grid index */
size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */
size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */
size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */
size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */
/* 20190322: Prior to entering OpenMP loop, collect specified SGS information */
const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */
if(rgr->sgs_frc_nm){
/* Normalization test:
fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc
/bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc
ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method
ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc
ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */
if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm);
sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm);
var_nm=sgs_frc_nm;
var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */
var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */
var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */
var_sz_out=grd_sz_out; /* Assume this holds */
char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */
int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */
sgs_id=in_id;
if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){
/* If sgs_frc_nm is not in input file then search for it in external area file */
#ifdef WIN32
const char sls_chr='\\'; /* [chr] Slash character */
#else /* !WIN32 */
const char sls_chr='/'; /* [chr] Slash character */
#endif /* !WIN32 */
char *sls_ptr; /* [sng] Pointer to last slash character (' ') */
sls_ptr=strrchr(var_nm,sls_chr);
if(!sls_ptr){
(void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/' or backslash '\\', as appropriate) portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm);
nco_exit(EXIT_FAILURE);
} /* !sls_ptr */
sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */
*sls_ptr='\0'; /* NULL-terminate filename */
fl_sgs=(char *)strdup(var_nm);
var_nm=sgs_frc_nm; /* NB: too tricky? */
rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id);
if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){
(void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs);
nco_exit(EXIT_FAILURE);
} /* !rcd */
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs);
} /* !rcd */
rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in);
dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out;
dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx);
var_sz_in*=dmn_cnt_in[dmn_idx];
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
if(var_sz_in != grd_sz_in){
(void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in);
nco_exit(EXIT_FAILURE);
} /* !var_sz_in */
/* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */
has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl);
if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE;
sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer");
rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr);
/* If sgs_frc comes from external local file, close it now */
if(fl_sgs){
rcd=nco_close(sgs_id);
fl_sgs=(char *)nco_free(fl_sgs);
} /* !fl_sgs */
/* Initialize output */
sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer");
/* Initialize and regrid sgs_frc_out
20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values.
20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice.
Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0;
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++)
if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_cmp_dbl)
sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx];
/* Sanity check sgs_frc_out */
if(nco_dbg_lvl_get() >= nco_dbg_fl){
/* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm
CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0
Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
/* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */
if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm);
} /* !dst_idx */
} /* !dbg */
// for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
// (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]);
// } /* !dst_idx */
if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
} /* !sgs_frc_nm */
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n");
/* Using naked stdin/stdout/stderr in parallel region generates
warning Copy appropriate filehandle to variable scoped as shared
in parallel clause */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
/* OpenMP notes:
default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it.
firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization
private(): Almost everything else
shared(): uggh...shared clause depends on both compiler and compiler-version
1. Const variables (e.g., flg_rnr,fnc_nm,wgt_vld_thr) are default shared for gcc >= 4.9.2,
2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis)
3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea)
4. assert() cannot be used in OpenMP blocks
5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html
20200221: fxm Revisit default(none) in light of above article */
#ifdef __GNUG__
# define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
# if GCC_LIB_VERSION < 490
# define GXX_OLD_OPENMP_SHARED_TREATMENT 1
# endif /* 480 */
# if GCC_LIB_VERSION >= 900
# define GXX_WITH_OPENMP5_GPU_SUPPORT 1
# endif /* 900 */
#endif /* !__GNUC__ */
#if defined( __INTEL_COMPILER)
# pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw,wgt_vld_thr)
#else /* !__INTEL_COMPILER */
# ifdef GXX_OLD_OPENMP_SHARED_TREATMENT
# pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw)
# else /* !old g++ */
# if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0
# pragma omp target teams distribute parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw)
# else
# pragma omp parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw)
# endif /* !GCC >= 9.0 */
# endif /* !GCC < 4.9 */
#endif /* !__INTEL_COMPILER */
for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){
trv=trv_tbl->lst[idx_tbl];
thr_idx=omp_get_thread_num();
in_id=trv_tbl->in_id_arr[thr_idx];
#ifdef _OPENMP
if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : "");
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm);
#endif /* !_OPENMP */
if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm);
if(trv.flg_rgr){
/* Regrid variable */
var_nm=trv.nm;
var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */
var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */
var_sz_in=1L;
var_sz_out=1L;
rcd=nco_inq_varid(in_id,var_nm,&var_id_in);
rcd=nco_inq_varid(out_id,var_nm,&var_id_out);
rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in);
rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out);
dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out;
dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int));
dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int));
dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */
dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long));
rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in);
rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out);
for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){
rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx);
var_sz_in*=dmn_cnt_in[dmn_idx];
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){
rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx);
if(dmn_cnt_out[dmn_idx] == 0L){
/* No records have been written, so overwrite zero output record size with input record size */
char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */
int dmn_rec_id_in;
rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm);
rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in);
rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx);
} /* !dmn_cnt_out */
var_sz_out*=dmn_cnt_out[dmn_idx];
dmn_srt[dmn_idx]=0L;
} /* !dmn_idx */
/* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength)
Denote their convolution by level or 'lvl' for shorthand
There are lvl_nbr elements for each lat/lon or col position
20151011: Until today assume lat/lon and col are most-rapidly varying dimensions
20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */
lvl_nbr=1;
/* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */
for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx];
/* Determining whether an individual field _uses_ missing values is important because
memory requirements of next four malloc's (i.e., exclusive of wgt_raw) can sum to
~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE.
Traditionally has_mss_val answers "does this variable _have_ and explicit missing value?"
As of 20210909, we expand the meaning of has_mss_val, though only in nco_rgr_wgt()
Now has_mss_val means does the variable use the explicitly defined missing value, or,
failing that, does it use the implicitly defined missing value?
Only variables that _use_ a missing value need tally and wgt_vld_out arrays
mss_val_dbl is what nco_mss_val_get_dbl() returns---its meaning has not changed
However, it is no longer intended to be used
Instead we create mss_val_cmp_dbl, a more general value for comparison and assignment */
var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer");
var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer");
/* Obtain input variable */
rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr);
/* 20210909: Begin new missing value treatment */
has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl);
/* NB: mss_val_cmp_dbl must be defined since it is now always used by regridder (even when has_mss_val is False)
For instance flg_msk_apl block, below, uses mss_val_cmp_dbl for masked fields
And test for _usage_ of missing values, below, necessarily compares to mss_val_cmp_dbl
If missing value is not explicitly declared, use default missing value */
if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE;
/* Override float/double value with appropriate default missing value for integers */
if(!has_mss_val){
switch(var_typ_out){
case NC_BYTE: mss_val_cmp_dbl=NC_FILL_BYTE; break;
case NC_CHAR: mss_val_cmp_dbl=NC_FILL_CHAR; break;
case NC_SHORT: mss_val_cmp_dbl=NC_FILL_SHORT; break;
case NC_INT: mss_val_cmp_dbl=NC_FILL_INT; break;
case NC_FLOAT: mss_val_cmp_dbl=NC_FILL_FLOAT; break;
case NC_DOUBLE: mss_val_cmp_dbl=NC_FILL_DOUBLE; break;
case NC_UBYTE: mss_val_cmp_dbl=NC_FILL_UBYTE; break;
case NC_USHORT: mss_val_cmp_dbl=NC_FILL_USHORT; break;
case NC_UINT: mss_val_cmp_dbl=NC_FILL_UINT; break;
/* 20210909: Implicit type conversion generates warnings:
'long long' to 'double' changes value from -9223372036854775806 to -9223372036854775808
'unsigned long long' to 'double' changes value from 18446744073709551614 to 18446744073709551616
Warnings can be fixed with -Wimplicit-const-int-float-conversion */
case NC_INT64: mss_val_cmp_dbl=NC_FILL_INT64; break;
case NC_UINT64: mss_val_cmp_dbl=NC_FILL_UINT64; break;
case NC_STRING:
default: nco_dfl_case_nc_type_err(); break;
} /* !var_typ_in */
} /* !has_mss_val */
/* Re-initialize Boolean to True and override with False if variable _uses_ missing values */
has_mss_val=True;
for(idx_in=0;idx_in<var_sz_in;idx_in++){
if(var_val_dbl_in[idx_in] == mss_val_cmp_dbl) break;
} /* !idx_in */
/* If neither implicit nor explicit missing value is present, treat all values as valid */
if(idx_in == var_sz_in) has_mss_val=False;
/* 20210909: End new missing value treatment */
/* Memory allocation that depends on _FillValue and input variable contents */
if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer");
if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer");
/* Initialize output */
(void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr));
if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT));
if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr));
/* 20150914: Intensive variables require normalization, extensive do not
Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries
Extensive variables (population, counts, numbers of things) depend on gridcell boundaries
Extensive variables are the exception in models, yet are commonly used for sampling information, e.g.,
number of photons, number of overpasses
Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,...
20190420: Remove languishing, unfinished intensive variable code */
clock_t tm_srt; /* [us] Microseconds at start */
clock_t tm_end; /* [us] Microseconds at end */
float tm_drn; /* [s] Seconds elapsed */
if(nco_dbg_lvl_get() >= nco_dbg_var) tm_srt=clock();
/* This first block is for "normal" variables without sub-gridscale fractions */
if(!sgs_frc_out){
/* Apply weights */
if(!has_mss_val){
if(lvl_nbr == 1){
/* Weight single-level fields without missing values */
#ifdef ENABLE_GPU
# pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out])
# pragma omp target teams distribute parallel for simd schedule(static,1)
#else /* !ENABLE_GPU */
# if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
# endif /* !__GNUC__ */
#endif /* !ENABLE_GPU */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++)
var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx];
}else{
val_in_fst=0L;
val_out_fst=0L;
/* Weight multi-level fields without missing values */
#ifdef ENABLE_GPU
# pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out])
# pragma omp parallel for reduction(+:val_in_fst,val_out_fst)
#endif /* !ENABLE_GPU */
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
//if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst);
#ifdef ENABLE_GPU
# pragma omp target teams distribute parallel for simd schedule(static,1)
#else /* !ENABLE_GPU */
# if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
# endif /* !__GNUC__ */
#endif /* !ENABLE_GPU */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++)
var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx];
val_in_fst+=grd_sz_in;
val_out_fst+=grd_sz_out;
} /* !lvl_idx */
} /* lvl_nbr > 1 */
}else{ /* has_mss_val */
if(lvl_nbr == 1){
/* Weight single-level fields with missing values */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
idx_in=col_src_adr[lnk_idx];
idx_out=row_dst_adr[lnk_idx];
if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){
var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx];
if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx];
tally[idx_out]++;
} /* !mss_val_cmp_dbl */
} /* !lnk_idx */
}else{ /* lvl_nbr > 1 */
val_in_fst=0L;
val_out_fst=0L;
/* Weight multi-level fields with missing values */
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
idx_in=col_src_adr[lnk_idx]+val_in_fst;
idx_out=row_dst_adr[lnk_idx]+val_out_fst;
if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){
var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx];
if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx];
tally[idx_out]++;
} /* !mss_val_cmp_dbl */
} /* !lnk_idx */
val_in_fst+=grd_sz_in;
val_out_fst+=grd_sz_out;
} /* !lvl_idx */
} /* lvl_nbr > 1 */
} /* !has_mss_val */
if(!has_mss_val){
/* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none"
When this occurs for conservative remapping, follow "destarea" normalization procedure
See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically
http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000
"frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0.
frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise.
If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file:
! Adjust destination field by fraction
do i=1, n_b
if (frac_b(i) .ne. 0.0) then
dst_field(i)=dst_field(i)/frac_b(i)
endif
enddo"
NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise)
NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative.
20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default
"destarea" and frac_b = frc_dst together set flg_frc_nrm
Formerly flg_frc_nrm triggered ESMF renormalization by default
Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it
This keep conservative maps conservative by default
NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr
20151018: Avoid double-normalizing by only executing fractional normalization
(flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */
if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */
if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */
if(lvl_nbr == 1){
/* Fractionally renormalize single-level fields without missing values */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx];
}else{
/* Fractionally renormalize multi-level fields without missing values */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
if(frc_out[dst_idx] != 0.0){
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx];
} /* !lvl_idx */
} /* !frc_out */
} /* !dst_idx */
} /* lvl_nbr > 1 */
} /* !flg_rnr */
} /* !flg_frc_nrm */
} /* !has_mss_val */
if(has_mss_val){
/* NCL and ESMF treatment of weights and missing values described at
https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking
http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000
NCO implements one of two procedures: "conservative" or "renormalized"
The "conservative" algorithm uses all valid data from the input grid on the output grid
Destination cells receive the weighted valid values of the source cells
This is conservative because the global integrals of the source and destination fields are equal
The "renormalized" algorithm divides the destination value by the sum of the valid weights
This returns "reasonable" values, i.e., the mean of the valid input values
However, renormalization is equivalent to extrapolating valid data to missing regions
Hence the input and output integrals are unequal and the regridding is not conservative */
/* In fields with missing values, destination cells with no accumulated weight are missing value */
for(dst_idx=0;dst_idx<var_sz_out;dst_idx++)
if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;
if(flg_rnr){
// if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm);
if(wgt_vld_thr == 0.0){
/* Renormalize cells with no threshold by valid accumulated weight */
for(dst_idx=0;dst_idx<var_sz_out;dst_idx++)
if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];
}else{
/* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */
for(dst_idx=0;dst_idx<var_sz_out;dst_idx++)
if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;}
} /* !wgt_vld_thr */
} /* !flg_rnr */
} /* !has_mss_val */
} /* !sgs_frc_out */
/* Variables with sub-gridscale fractions require "double-weighting" and normalization */
if(sgs_frc_out){
if(!strcmp(var_nm,sgs_frc_nm)){
/* Copy shared variable sgs_frc_out that was regridded before OpenMP loop
20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */
memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr));
}else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){
/* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0;
}else{ /* !sgs_msk_nm */
/* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */
if(!has_mss_val){
if(lvl_nbr == 1){
/* SGS-regrid single-level fields without missing values */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++)
var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]];
/* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];
}else{ /* lvl_nbr > 1 */
/* SGS-regrid multi-level fields without missing values */
val_in_fst=0L;
val_out_fst=0L;
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
idx_in=col_src_adr[lnk_idx];
idx_out=row_dst_adr[lnk_idx];
var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in];
} /* !lnk_idx */
/* Normalize current level values */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx];
val_in_fst+=grd_sz_in;
val_out_fst+=grd_sz_out;
} /* !lvl_idx */
} /* lvl_nbr > 1 */
}else{ /* !has_mss_val */
if(lvl_nbr == 1){
/* SGS-regrid single-level fields with missing values */
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
idx_in=col_src_adr[lnk_idx];
idx_out=row_dst_adr[lnk_idx];
if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){
var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in];
tally[idx_out]++;
} /* !mss_val_cmp_dbl */
} /* !lnk_idx */
/* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++)
if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];}
}else{ /* lvl_nbr > 1 */
/* SGS-regrid multi-level fields with missing values */
val_in_fst=0L;
val_out_fst=0L;
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){
idx_in=col_src_adr[lnk_idx]+val_in_fst;
idx_out=row_dst_adr[lnk_idx]+val_out_fst;
if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){
var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]];
tally[idx_out]++;
} /* !mss_val_cmp_dbl */
} /* !lnk_idx */
/* Normalize current level values */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
idx_out=dst_idx+val_out_fst;
if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];}
} /* dst_idx */
val_in_fst+=grd_sz_in;
val_out_fst+=grd_sz_out;
} /* !lvl_idx */
} /* lvl_nbr > 1 */
} /* !has_mss_val */
} /* !sgs_msk_nm */
} /* !sgs_frc_out */
if(nco_typ_ntg(var_typ_out)){
/* 20210407: Round, with rint(), integer fields before sending to netCDF for output
Otherwise implicit type conversion will truncate (rather than round) output values
This is critical for masks where rounding errors produce near integer values (e.g., 0.999...)
that could then be truncated to zero by implicit conversion instead of rounded up to 1. */
if(has_mss_val){
for(dst_idx=0;dst_idx<var_sz_out;dst_idx++)
if(var_val_dbl_out[dst_idx] != mss_val_cmp_dbl)
var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]);
}else{
for(dst_idx=0;dst_idx<var_sz_out;dst_idx++)
var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]);
} /* !has_mss_val */
} /* !nco_typ_ntg() */
if(flg_add_fll && !has_mss_val){
/* 20210604: Initialize fields without _FillValue in input file to default missing value in unmapped destination cells
Otherwise empty destination cells will be zero (not _FillValue) in output file
Fields with input _FillValue are already _FillValue in output where tally is zero */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
if(frc_out[dst_idx] == 0.0){
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=NC_FILL_DOUBLE;
} /* !lvl_idx */
} /* !frc_out */
} /* !dst_idx */
} /* !flg_add_fll */
if(flg_msk_apl){
/* 20210607: Overwrite output values with _FillValue where destination cell is masked
Same procedure regardless of whether input variables already have _FillValue
NB: This is separate, and presumably independent, from above flg_add_fll loop
Fields with flg_msk_apl will (harmlessly?) go through both loops */
double mss_val_msk; /* [frc] Missing value to apply where mask is false */
//if(has_mss_val) mss_val_msk=mss_val_dbl; else mss_val_msk=NC_FILL_DOUBLE;
mss_val_msk=mss_val_cmp_dbl; /* [frc] Missing value to apply where mask is false */
for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){
if(msk_out[dst_idx] == 0){
for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){
var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=mss_val_msk;
} /* !lvl_idx */
} /* !frc_out */
} /* !dst_idx */
} /* !flg_add_fll */
if(nco_dbg_lvl_get() >= nco_dbg_var){
tm_end=clock();
tm_drn=(float)(tm_end-tm_srt)/CLOCKS_PER_SEC;
(void)fprintf(fp_stdout,"%s: INFO Compute time for %s (thread %d/%d): %g s\n",nco_prg_nm_get(),trv.nm,thr_idx,omp_get_num_threads(),tm_drn);
} /* !dbg */
#pragma omp critical
{ /* begin OpenMP critical */
// rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr);
rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr);
} /* end OpenMP critical */
if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in);
if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out);
if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt);
if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in);
if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out);
if(tally) tally=(int *)nco_free(tally);
if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out);
if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in);
if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out);
}else{ /* !trv.flg_rgr */
/* Use standard NCO copy routine for variables that are not regridded */
#pragma omp critical
{ /* begin OpenMP critical */
(void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl);
} /* end OpenMP critical */
} /* !flg_rgr */
} /* !xtr */
} /* end (OpenMP parallel for) loop over idx_tbl */
if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n");
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr);
/* Free memory allocated for grid reading/writing */
if(area_out) area_out=(double *)nco_free(area_out);
if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr);
if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int);
if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int);
if(frc_out) frc_out=(double *)nco_free(frc_out);
if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out);
if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out);
if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out);
if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out);
if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out);
if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out);
if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out);
if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out);
if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out);
if(msk_out) msk_out=(int *)nco_free(msk_out);
if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr);
if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm);
if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in);
if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out);
if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm);
if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw);
return rcd;
} /* end nco_rgr_wgt() */
void
nco_bsl_zro /* Return Bessel function zeros */
(const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */
double * const bsl_zro) /* O [frc] Bessel zero */
{
/* Purpose: Return Bessel function zeros
Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F
Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0
First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact
Original version: CCM1
Standardized: J. Rosinski, June 1992
Reviewed: J. Hack, D. Williamson, August 1992
Reviewed: J. Hack, D. Williamson, April 1996
Modified 19970123 by Jim Rosinski to use double precision arithmetic
~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8)
20150530: Converted to C99 by C. Zender */
const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */
const double pi=M_PI; // [frc] 3
const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50
-1.e36, 2.4048255577, 5.5200781103,
8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679,
21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684,
33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132,
46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550,
58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299,
71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711,
84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819,
96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165,
109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309,
121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752,
134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737,
146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685};
const int bsl_zro_tbl_nbr_max=50; /* [nbr] */
int bsl_idx; /* [idx] Counting index */
/* Main Code */
if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm);
/* NB: Initialize bsl_zro[0] but (in C) never use it
Initialization prevents uninitialized memory warnings */
for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++)
if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx];
if(bsl_zro_nbr > bsl_zro_tbl_nbr_max)
for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++)
bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi;
if(nco_dbg_lvl_get() == nco_dbg_old){
(void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr);
(void)fprintf(stdout,"idx\tbsl_zro\n");
for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++)
(void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]);
} /* endif dbg */
return;
} /* end nco_bsl_zro() */
void
nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */
(const int lat_nbr, /* I [nbr] Latitude number */
const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */
double * const lat_sin, /* O [frc] Sine of latitudes */
double * const wgt_Gss) /* O [frc] Gaussian weights */
{
/* Purpose: Compute and return sine of Gaussian latitudes and their weights
Returned arrays are ordered south-to-north (S->N), not (N->S)
Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F
Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature
Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956
Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae
Original version: CCM1
Standardized: L. Bath, Jun 1992
L. Buja, Feb 1996
Reviewed: D. Williamson, J. Hack, Aug 1992
D. Williamson, J. Hack, Feb 1996
19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to
achieve (nearly) identical weights and latitudes on all machines.
~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8)
20150530: Converted to C99 by C. Zender
20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */
const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */
const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink)
const double pi=M_PI; // [frc] 3
const int itr_nbr_max=20; // [nbr] Maximum number of iterations
double c_cff; // Constant combination coefficient
double lat_idx_dbl; // Latitude index, double precision
double lat_nnr_idx_dbl; // Inner latitude index, double precision
double lat_nbr_dbl; // [nbr] Number of latitudes, double precision
double pk=double_CEWI; // Polynomial
double pkm1; // Polynomial
double pkm2; // Polynomial
double pkmrk; // Polynomial
double sp; // Current iteration latitude increment
double xz; // Abscissa estimate
double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings
int itr_cnt; // Iteration counter
int lat_idx; // [idx] Counting index (latitude)
int lat_sym_idx; // [idx] Counting index (symmetric latitude)
int lat_nnr_idx; // [idx] Counting index (inner latitude loop)
int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere)
double *lat_sin_p1; // Sine of Gaussian latitudes double precision
double *wgt_Gss_p1; // Gaussian weights double precision
/* Main Code */
if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm);
/* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */
lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision
wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision
/* Use Newton iteration to find abscissae */
c_cff=0.25*(1.0-4.0/(pi*pi));
lat_nbr_dbl=lat_nbr;
lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic
(void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1);
for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1
// 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse())
// Warnings occur with gcc-compiled code, not with clang-compiled code
cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff);
xz=cos(cos_arg);
/* First approximation to xz */
itr_cnt=0;
/* goto label_73 */
label_73:
pkm2=1.0;
pkm1=xz;
if(++itr_cnt > itr_nbr_max){
(void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Compute Legendre polynomial */
for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){
lat_nnr_idx_dbl=lat_nnr_idx;
pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl;
pkm2=pkm1;
pkm1=pk;
} /* end inner loop over lat_nnr */
pkm1=pkm2;
pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz);
sp=pk/pkmrk;
xz=xz-sp;
/* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */
if(fabs(sp) > eps_rlt) goto label_73;
lat_sin_p1[lat_idx]=xz;
wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1));
} /* end outer loop over lat */
if(lat_nbr != lat_nbr_rcp2*2){
/* When lat_nbr is odd, compute weight at Equator */
lat_sin_p1[lat_nbr_rcp2+1]=0.0;
pk=2.0/(lat_nbr_dbl*lat_nbr_dbl);
for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){
lat_idx_dbl=lat_idx;
pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0));
} /* end loop over lat */
wgt_Gss_p1[lat_nbr_rcp2+1]=pk;
} /* endif lat_nbr is odd */
/* Complete sets of abscissas and weights, using symmetry properties */
for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){
lat_sym_idx=lat_nbr-lat_idx+1;
lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx];
wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx];
} /* end loop over lat */
/* Shift by one to remove Fortran offset in p1 arrays */
//memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double));
//memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double));
/* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south
Shift by one to remove Fortran offset in p1 arrays */
if(flg_s2n){
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx];
wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx];
} /* end loop over lat */
}else{
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
lat_sin[lat_idx]=lat_sin_p1[lat_idx+1];
wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1];
} /* end loop over lat */
} /* !flg_s2n */
if(nco_dbg_lvl_get() == nco_dbg_old){
(void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr);
(void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n");
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++)
(void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]);
} /* endif dbg */
if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1);
if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1);
return;
} /* end nco_lat_wgt_gss() */
void
nco_sph_plg_area /* [fnc] Compute area of spherical polygon */
(rgr_sct * const rgr, /* I [sct] Regridding structure */
const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */
const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */
const long col_nbr, /* [nbr] Number of columns in grid */
const int bnd_nbr, /* [nbr] Number of bounds in gridcell */
double * const area) /* [sr] Gridcell area */
{
/* Purpose: Compute area of spherical polygon */
/* Computing triangular area accurately is hard in corner cases
Spherical triangle suffer from at least as many issues as planar, which are described by
"Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley
In particular, the Law of Cosines and Heron's formula can be ill-conditioned
For spherical triangles L'Huilier's Theorem is superior to Girard's Formula:
http://mathworld.wolfram.com/LHuiliersTheorem.html
Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost
L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles
semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c)
Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi)
SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool
Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c
https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess
See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative:
http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html
The discussion mentions Mil94
Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137.
http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false
Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex)
However, his method places single vertex at pole, then adds signed areas to obtain full polygon area
His method may suffer from degraded precision because of roundoff error and long side-lengths
So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi
Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta))
Dot-product rule for vectors gives interior angle/arc length between two points:
cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2)
Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle:
https://en.wikipedia.org/wiki/Spherical_law_of_cosines
cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A)
cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B)
cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C)
cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)]
cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)]
cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)]
Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr)
Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values
Given (lat,lon) for N-points algorithm to find area of spherical polygon is:
1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses
A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge
B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi
2. CSZ decomposition (N-2 triangles) with L'Huilier areas,
Convert polygon into triangles by cycling spoke through all sides from common apex
This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization
It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons
Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point
A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.)
i. First vertice anchors all triangles
ii. Third vertice of preceding triangle becomes second vertice of next triangle
iii. Next non-identical point becomes last vertice of next triangle
iv. Side C of previous triangle is side A of next triangle
B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula
3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas:
Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices
This method requires computation of N triangles, though fewer sides due to optimization
Moreover, it works on all convex polygons and on slightly concave polygons
Centroid/hub has clear view of interior of most simple concave polygons
4. Any decomposition but with exact RLL grids by Zender and Agress 20160918
A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above)
B. Determine whether triangle is spherical or contains RLL (constant latitude)
C. Spherical triangles use L'Huilier, RLL triangles use series expansion */
const char fnc_nm[]="nco_sph_plg_area()";
const double dgr2rdn=M_PI/180.0;
int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */
long idx; /* [idx] Counting index for unrolled grids */
short int bnd_idx;
/* Shift to this method once we pass rgr into nco_sph_plg_area() */
nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */
nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */
nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */
nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */
if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc;
edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */
ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */
if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True;
if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True;
assert(flg_mth_ctr != flg_mth_csz);
bnd_nbr_ttl=bnd_nbr;
// Allocate space for one extra boundary to store centroid information if necessary
if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1;
double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */
double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */
double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */
double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */
double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */
double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */
/* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */
lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double));
lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double));
lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double));
lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double));
lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double));
lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double));
memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double));
memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double));
for(idx=0;idx<col_nbr*bnd_nbr;idx++){
lon_bnd_rdn[idx]*=dgr2rdn;
lat_bnd_rdn[idx]*=dgr2rdn;
lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]);
lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]);
lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]);
lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]);
} /* !idx */
double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */
double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */
double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */
double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */
double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */
double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */
double lat_ctr; /* [dgr] Latitude of polygon centroid */
double lon_ctr; /* [dgr] Longitude of polygon centroid */
double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */
double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */
double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */
double lat_dlt; /* [rdn] Latitudinal difference */
double lon_dlt; /* [rdn] Longitudinal difference */
double ngl_a; /* [rdn] Interior angle/great circle arc a */
double ngl_b; /* [rdn] Interior angle/great circle arc b */
double ngl_c; /* [rdn] Interior angle/great circle arc c */
double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */
double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */
double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */
double prm_smi; /* [rdn] Semi-perimeter of triangle */
double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */
double xcs_sph; /* [sr] Spherical excess */
int tri_nbr; /* [nbr] Number of triangles in polygon */
long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */
long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */
long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */
long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */
long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */
long idx_a; /* [idx] Point A 1-D index */
long idx_b; /* [idx] Point B 1-D index */
long idx_c; /* [idx] Point C 1-D index */
nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */
nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */
nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */
nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */
nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */
nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */
nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */
/* Initialize global accumulators */
area_ttl=0.0;
area_smc_ttl=0.0;
area_smc_crc_ttl=0.0;
area_smc_crc_abs_ttl=0.0;
for(long col_idx=0;col_idx<col_nbr;col_idx++){
/* Initialize local properties and accumulators for this cell/polygon */
flg_ply_has_smc=False;
ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */
area[col_idx]=0.0;
area_smc=0.0;
tri_nbr=0;
if(col_idx == 0){
a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long));
b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long));
c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long));
vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long));
} /* !col_idx */
/* Safety re-initialization to ease debugging, not strictly necessary */
for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){
vrt_vld[bnd_idx]=NC_MIN_INT;
a_idx[bnd_idx]=NC_MIN_INT;
b_idx[bnd_idx]=NC_MIN_INT;
c_idx[bnd_idx]=NC_MIN_INT;
} /* !bnd_idx */
if(flg_mth_ctr){
double lon_dff; /* [dgr] Longitude difference */
long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */
long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */
long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */
/* First vertice is always valid */
bnd_srt_idx=bnd_nbr*col_idx;
bnd_vld_idx=bnd_srt_idx;
vrt_vld[0]=bnd_vld_idx;
lat_ctr=lat_bnd[bnd_srt_idx];
lon_ctr=lon_bnd[bnd_srt_idx];
bnd_vld_nbr=1;
/* First guess for next valid index */
bnd_idx=1;
/* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */
while(bnd_idx<bnd_nbr){
/* Skip repeated points that must occur when polygon has fewer than allowed vertices */
while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){
/* Next valid vertice must not duplicate first vertex */
bnd_idx++;
/* Have we already found all valid vertices? */
if(bnd_idx == bnd_nbr) break;
} /* !while */
/* Jump to normalization when all valid vertices found */
if(bnd_idx == bnd_nbr) break;
/* Current vertex is valid (non-duplicative) */
bnd_vld_idx=bnd_srt_idx+bnd_idx;
vrt_vld[bnd_vld_nbr]=bnd_vld_idx;
bnd_vld_nbr++;
if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr);
assert(bnd_vld_nbr <= bnd_nbr);
lat_ctr+=lat_bnd[bnd_vld_idx];
lon_ctr+=lon_bnd[bnd_vld_idx];
lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0];
if(lon_dff >= 180.0){
lon_ctr-=360.0;
}else if(lon_dff <= -180.0){
lon_ctr+=360.0;
} /* !lon_dff */
/* Search for next valid vertice in next iteration */
bnd_idx++;
} /* !bnd_idx */
/* Compute centroid */
lat_ctr/=bnd_vld_nbr;
lon_ctr/=bnd_vld_nbr;
/* Centroid can become point A of bnd_nbr polygons or optimize algorithm:
1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3
2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4
3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */
lat_ctr_rdn=lat_ctr*dgr2rdn;
lon_ctr_rdn=lon_ctr*dgr2rdn;
lat_ctr_cos=cos(lat_ctr_rdn);
/* Place centroid values in extended arrays for easy access */
lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn;
lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn;
lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos;
/* Polygon centroid and valid vertices are now known */
assert(bnd_vld_nbr > 2);
if(bnd_vld_nbr == 3){
/* Three vertices only means polygon is already decomposed into a triangle */
tri_nbr=1;
a_idx[0]=vrt_vld[0];
b_idx[0]=vrt_vld[1];
c_idx[0]=vrt_vld[2];
}else if(bnd_vld_nbr == 4){
/* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */
tri_nbr=2;
a_idx[0]=vrt_vld[0];
b_idx[0]=vrt_vld[1];
c_idx[0]=vrt_vld[2];
a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */
b_idx[1]=vrt_vld[2];
c_idx[1]=vrt_vld[3];
}else if(bnd_vld_nbr >= 5){
/* Centroid method has as many triangles as valid vertices */
tri_nbr=bnd_vld_nbr;
for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){
a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */
b_idx[tri_idx]=vrt_vld[tri_idx];
c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr];
} /* !tri_idx */
} /* !bnd_vld_nbr */
} /* !flg_mth_ctr */
if(flg_mth_csz){
/* A is always first vertice of all triangles */
idx_a=bnd_nbr*col_idx;
/* Start search for B at next vertice */
bnd_idx=1;
/* bnd_idx labels offset from point A of potential location of triangle points B and C
We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */
while(bnd_idx<bnd_nbr-1){
/* Only first triangle must search for B, subsequent triangles recycle previous C as current B */
if(tri_nbr == 0){
/* Skip repeated points that must occur when polygon has fewer than allowed vertices */
/* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes)
That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition
(aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle,
and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it).
Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends
only on the interior angles and side lengths, not the longitudes of polar points.
Small circles change that last part, and we must now eliminate repeated polar points. */
if(edg_typ == nco_edg_smc){
/* Skip repeated numerically identical points */
while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){
/* Next vertice may not duplicate A */
bnd_idx++;
/* If there is no room for C then all triangles found */
if(bnd_idx == bnd_nbr-1) break;
} /* !while */
/* Skip geometrically identical (i.e., repeated polar) points */
while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){
bnd_idx++;
if(bnd_idx == bnd_nbr-1) break;
} /* !while */
}else if(edg_typ != nco_edg_smc){
/* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */
while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){
/* Next vertice may not duplicate A */
bnd_idx++;
/* If there is no room for C then all triangles found */
if(bnd_idx == bnd_nbr-1) break;
} /* !while */
}else{
abort();
} /* !edg_typ */
/* Jump to next column when all triangles found */
if(bnd_idx == bnd_nbr-1) break;
} /* !tri_nbr */
idx_b=idx_a+bnd_idx;
/* Search for C at next vertice */
bnd_idx++;
/* fxm */
while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){
/* Next vertice may not duplicate B */
bnd_idx++;
/* If there is no room for C then all triangles found */
if(bnd_idx == bnd_nbr) break;
} /* !while */
/* Jump to next column when all triangles found */
if(bnd_idx == bnd_nbr) break;
idx_c=idx_a+bnd_idx;
/* Valid triangle, vertices are known and labeled */
a_idx[tri_nbr]=idx_a;
b_idx[tri_nbr]=idx_b;
c_idx[tri_nbr]=idx_c;
tri_nbr++;
/* Begin search for next B at current C */
bnd_idx=idx_c-idx_a;
} /* !bnd_idx */
} /* !flg_mth_csz */
/* Triangles are known for requested decomposition method
Compute and accumulate their area
Optimized algorithm recycles previous arc c as current arc a (after first triangle) */
for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){
idx_a=a_idx[tri_idx];
idx_b=b_idx[tri_idx];
idx_c=c_idx[tri_idx];
if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c);
/* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */
if(tri_idx == 0){
/* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results
ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc
ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc
ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc
ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc
ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc
ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc
ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc
ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc
ncol=0 on conus chevrons file:
3.653857995295246e-05 raw GLL weight
3.653857995294305e-05 ESMF weight (area_b from map-file)
3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT
3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT
3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine)
3.653857995289623e-05 NCO CSZ decomposition (old acos)
20191011: Tested this same polygon in ESMF and NCO weight-generator
NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col:
ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 "
ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc
3.653857995294305e-05
ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc
3.653857995295246e-05
ESMF and NCO weight-generators produce nearly identical S results to double-precision:
ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc
2.181999640069480e-03, 1.309571213636605e-02
ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc
2.181999640069454e-03, 1.309571213636510e-02
Compare first five polygon areas:
ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc
ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc
3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave
3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05,
Compare total areas:
ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc
ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc
ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc
ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc
aave: 1.256637061435867e+01, 1.256637061435973e+01
nco: 1.256637061435857e+01, 1.256637061435955e+01
4*pi: 1.25663706143591729538e+01
Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ?
(1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333
No, normalization alone does not explain differences between ESMF and NCO
It does not appear that ESMF does a global normalization of areas/weights */
/* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees
Cosine small angles changes slowly for such angles, and leads to precision loss
Use haversine formula instead of spherical law of cosines formula
https://en.wikipedia.org/wiki/Great-circle_distance */
/* Interior angle/great circle arc a, spherical law of cosines formula (loses precision):
cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+
lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+
lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */
/* Interior angle/great circle arc a, haversine formula: */
// 20160918: Use branch cut rules for longitude
lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b]));
lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]);
sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2));
ngl_a=2.0*asin(sin_hlf_tht);
}else{ /* !tri_idx == 0 */
ngl_a=ngl_c;
} /* !tri_idx == 0 */
/* Interior angle/great circle arc b */
lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c]));
lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]);
sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2));
ngl_b=2.0*asin(sin_hlf_tht);
/* Interior angle/great circle arc c */
lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a]));
lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]);
sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2));
ngl_c=2.0*asin(sin_hlf_tht);
/* Semi-perimeter */
prm_smi=0.5*(ngl_a+ngl_b+ngl_c);
/* L'Huilier's formula results in NaN if any side exceeds semi-perimeter
This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c
20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps
Problematic isoceles triangles are much rarer than problematic needles
Therefore look for needle-issues first, then, if none found, look for isoceles issues
Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula
https://en.wikipedia.org/wiki/Spherical_trigonometry
Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier
Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */
flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False;
if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */
else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */
else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */
if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True;
if(!flg_sas_ndl){
/* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side
This occurs for flat, isoceles-shaped triangles
Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */
/* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */
const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */
const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */
if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */
else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */
else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */
if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True;
} /* !flg_sas_ndl */
if(flg_sas_isc || flg_sas_ndl){
/* Compute area using SAS formula */
double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */
//double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */
double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */
double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */
double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */
/* Transform sides into canonical order for formula where C is surface angle between arcs a and b */
if(flg_sas_c){
ngl_ltr_a=ngl_a;
ngl_ltr_b=ngl_b;
ngl_ltr_c=ngl_c;
} /* !flg_sas_c */
if(flg_sas_a){
ngl_ltr_a=ngl_b;
ngl_ltr_b=ngl_c;
ngl_ltr_c=ngl_a;
} /* !flg_sas_a */
if(flg_sas_b){
ngl_ltr_a=ngl_c;
ngl_ltr_b=ngl_a;
ngl_ltr_c=ngl_b;
} /* !flg_sas_b */
if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c);
if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b));
/* Determine canonical surface angle C
To find any angle given three spherical triangle sides, Wikipedia opines:
"The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred."
Half-angle formulae include two applicable variants that yield the sine or cosine of half C
Then C is determined as twice the asin() or acos() function, respectively
For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0
For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5
For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0
For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0
Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise
20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why?
Adopting cosine formula because it works */
//sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula
cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula
//ngl_sfc_ltr_C=2.0*asin(sin_hlf_C);
ngl_sfc_ltr_C=2.0*acos(cos_hlf_C);
/* SAS formula */
tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b);
xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C));
assert(fabs(xcs_sph_hlf_tan) != M_PI_2);
xcs_sph=2.0*atan(xcs_sph_hlf_tan);
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph);
// Single-line version
// xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))));
/* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics
For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?)
Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */
/* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get());
xcs_sph=0.0;
/* !flg_sas */
}else{
double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */
xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c)));
assert(fabs(xcs_sph_qtr_tan) != M_PI_2);
xcs_sph=4.0*atan(xcs_sph_qtr_tan);
/* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */
// xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c))));
} /* !flg_sas */
if(isnan(xcs_sph)){
const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */
/* Categorize reason for NaN */
(void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c);
if( /* Side exceeds semi-perimeter */
(ngl_a > prm_smi) ||
(ngl_b > prm_smi) ||
(ngl_c > prm_smi)
){
(void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c);
}else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */
(ngl_a < eps_ngl_skn) ||
(ngl_b < eps_ngl_skn) ||
(ngl_c < eps_ngl_skn)
){
(void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c);
}else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */
((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) ||
((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) ||
((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a]))
){
(void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]);
}else{
(void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c);
} /* !co-linear */
xcs_sph=0.0;
} /* !NaN */
area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */
area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */
area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */
area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */
/* 20160918 from here to end of loop is non-spherical work
20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them
Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude
ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc
ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */
flg_tri_crr_smc=False;
if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] ||
lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] ||
lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){
/* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */
if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True;
if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]);
} /* endif */
if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){
double ngl_plr; /* [rdn] Polar angle (co-latitude) */
long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */
long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */
long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */
/* Rotate labels to standard position with vertex A, equi-latitude points B and C */
if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){
idx_ltr_a=idx_c;
idx_ltr_b=idx_a;
idx_ltr_c=idx_b;
ngl_ltr_a=ngl_c;
ngl_ltr_b=ngl_a;
ngl_ltr_c=ngl_b;
ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]);
}else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){
idx_ltr_a=idx_a;
idx_ltr_b=idx_b;
idx_ltr_c=idx_c;
ngl_ltr_a=ngl_a;
ngl_ltr_b=ngl_b;
ngl_ltr_c=ngl_c;
ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]);
}else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){
idx_ltr_a=idx_b;
idx_ltr_b=idx_c;
idx_ltr_c=idx_a;
ngl_ltr_a=ngl_b;
ngl_ltr_b=ngl_c;
ngl_ltr_c=ngl_a;
ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]);
}else{
(void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]);
abort();
} /* endif */
/* 20160918: Compute exact area of latitude triangle wedge */
double xpn_x; /* [frc] Expansion parameter */
lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c]));
assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes
if(lon_dlt != M_PI){
/* Normal clause executed for small-circle triangles */
/* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */
xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt);
assert(fabs(xpn_x) != M_PI_2);
area_smc_crc=2.0*atan(xpn_x);
/* 20170217: Sungduk's POP regrid triggers following abort():
ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */
//assert(xpn_x >= 0.0);
//if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b];
area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b];
}else{
/* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees
Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole:
CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole
Solution candidates:
1. Divide such quadrilaterals using centroid technique
Just realized current implementation of centroid decomposition fails on polar caps
Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes
Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes
Requires testing each polygon to determine if it contains pole <- Too difficult/expensive
2. Assume latitude triangles whose base is 180 degrees are at pole
Compute area exactly using analytic formula for annular lune */
(void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]);
(void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph);
for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++)
(void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]);
(void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get());
/* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula
Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */
area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc;
} /* !lon_dlt */
// Adjust diagnostic areas by small-circle area correction
area_smc+=area_smc_crc;
area_smc_ttl+=area_smc_crc;
area_smc_crc_ttl+=area_smc_crc;
area_smc_crc_abs_ttl+=fabs(area_smc_crc);
// 20200109: Adjust area reported to calling code by small-circle area correction
area[col_idx]+=area_smc_crc;
if(0){
/* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */
double xpn_x_sqr; /* [frc] Expansion parameter squared */
double xpn_sum; /* [frc] Expansion sum */
double xpn_nmr; /* [frc] Expansion term numerator */
double xpn_trm; /* [frc] Expansion term */
double xpn_dnm; /* [frc] Expansion term denominator */
const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */
unsigned short int idx_xpn; /* [idx] Index in series expansion */
xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt);
xpn_x_sqr=xpn_x*xpn_x;
xpn_nmr=xpn_x;
xpn_dnm=1.0;
xpn_trm=xpn_nmr/xpn_dnm;
xpn_sum+=xpn_trm;
for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){
xpn_nmr*=xpn_x_sqr;
xpn_dnm*=(idx_xpn-1)*idx_xpn;
xpn_trm=xpn_nmr/xpn_dnm;
xpn_sum+=xpn_trm;
} /* !idx_xpn */
(void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get());
} /* !0 */
if(nco_dbg_lvl_get() >= nco_dbg_scl){
(void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph);
if(fabs(area_smc_crc/xcs_sph) > 0.1){
(void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]);
} /* !fabs */
} /* !dbg */
} /* !edg_typ && flg_tri_crr_smc */
} /* !tri_idx */
if(edg_typ == nco_edg_smc && flg_ply_has_smc){
/* Current gridcell contained at least one latitude-triangle */
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]);
} /* !edg_typ && !flg_ply_has_smc */
} /* !col_idx */
if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl);
if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld);
if(a_idx) a_idx=(long *)nco_free(a_idx);
if(b_idx) b_idx=(long *)nco_free(b_idx);
if(c_idx) c_idx=(long *)nco_free(c_idx);
if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn);
if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn);
if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos);
if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos);
if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin);
if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin);
} /* !nco_sph_plg_area() */
int /* O [enm] Return code */
nco_rgr_tps /* [fnc] Regrid using TempestRemap library */
(rgr_sct * const rgr) /* I/O [sct] Regridding structure */
{
/* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables)
Routine was originally written to call Tempest executables
However, that functionality was all placed into the ncremap shell script
Thus this C-interface is currently unused
TempestRemap2 has a library that may be accessed on-line
Test Tempest library: no way to activate yet
export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */
const char fnc_nm[]="nco_rgr_tps()";
const int fmt_chr_nbr=6;
const char *cmd_rgr_fmt;
char *cmd_rgr;
char fl_grd_dst[]="/tmp/foo_outRLLMesh.g";
char *fl_grd_dst_cdl;
int rcd_sys;
int lat_nbr_rqs=180;
int lon_nbr_rqs=360;
nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */
char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */
nvr_DATA_TEMPEST=getenv("DATA_TEMPEST");
rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp");
if(nco_dbg_lvl_get() >= nco_dbg_crr){
(void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL");
(void)fprintf(stderr,"\n");
} /* endif dbg */
/* Allow for whitespace characters in fl_grd_dst
Assume CDL translation results in acceptable name for shell commands */
fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst);
/* Construct and execute regridding command */
nco_tps_cmd=nco_rgr_GenerateRLLMesh;
cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd);
cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char));
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst);
(void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl);
rcd_sys=system(cmd_rgr);
if(rcd_sys == -1){
(void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n");
/* Clean-up memory */
if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl);
if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr);
return NCO_NOERR;
} /* end nco_rgr_tps() */
const char * /* O [sng] String describing two-dimensional grid-type */
nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */
(const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */
{
/* Purpose: Convert two-dimensional grid-type enum to string */
switch(nco_grd_2D_typ){
case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)";
case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM.";
case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO.";
case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE.";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_grd_2D_sng() */
const char * /* O [sng] String describing latitude grid-type */
nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */
(const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */
{
/* Purpose: Convert latitude grid-type enum to string */
switch(nco_grd_lat_typ){
case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)";
case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM.";
case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO.";
case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE.";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_grd_lat_sng() */
const char * /* O [sng] String describing longitude grid-type */
nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */
(const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */
{
/* Purpose: Convert longitude grid-type enum to string */
switch(nco_grd_lon_typ){
case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)";
case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell";
case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell";
case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell";
case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell";
case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_grd_lon_sng() */
const char * /* O [sng] String describing grid extent */
nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */
(const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */
{
/* Purpose: Convert grid-extent enum to string */
switch(nco_grd_xtn){
case nco_grd_xtn_nil: return "Unknown";
case nco_grd_xtn_glb: return "Global";
case nco_grd_xtn_rgn: return "Regional";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_grd_xtn_sng() */
const char * /* O [sng] String describing grid conversion */
nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */
(const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */
{
/* Purpose: Convert grid conversion enum to string */
switch(nco_rgr_typ){
case nco_rgr_grd_1D_to_1D: return "1D_to_1D";
case nco_rgr_grd_1D_to_2D: return "1D_to_2D";
case nco_rgr_grd_2D_to_1D: return "2D_to_1D";
case nco_rgr_grd_2D_to_2D: return "2D_to_2D";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_rgr_grd_sng() */
const char * /* O [sng] String describing regridding method */
nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */
(const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */
{
/* Purpose: Convert regridding method enum to string */
switch(nco_rgr_mth_typ){
case nco_rgr_mth_conservative: return "Conservative remapping";
case nco_rgr_mth_bilinear: return "Bilinear remapping";
case nco_rgr_mth_none: return "none";
case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_rgr_mth_sng() */
const char * /* O [sng] String describing mapfile generator */
nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */
(const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */
{
/* Purpose: Convert mapfile generator enum to string */
switch(nco_rgr_mpf_typ){
case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL";
case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)";
case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)";
case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+";
case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator";
case nco_rgr_mpf_MBTR: return "MOAB-TempestRemap Online Regridding Weight Generator";
case nco_rgr_mpf_unknown: return "Unknown Weight Generator";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_rgr_mpf_sng() */
const char * /* O [sng] String describing regridding normalization */
nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */
(const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */
{
/* Purpose: Convert regridding normalization enum to string */
switch(nco_rgr_nrm_typ){
case nco_rgr_nrm_fracarea: return "fracarea";
case nco_rgr_nrm_destarea: return "destarea";
case nco_rgr_nrm_none: return "none";
case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)";
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_rgr_nrm_sng() */
const char * /* O [sng] String containing regridding command and format */
nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */
(const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */
{
/* Purpose: Convert TempestRemap command enum to command string and format */
switch(nco_tps_cmd){
case nco_rgr_ApplyOfflineMap:
return "ApplyOfflineMap";
case nco_rgr_CalculateDiffNorms:
return "CalculateDiffNorms";
case nco_rgr_GenerateCSMesh:
return "GenerateCSMesh --res %d --file %s";
case nco_rgr_GenerateGLLMetaData:
return "GenerateGLLMetaData";
case nco_rgr_GenerateICOMesh:
return "GenerateICOMesh";
case nco_rgr_GenerateLambertConfConicMesh:
return "GenerateLambertConfConicMesh";
case nco_rgr_GenerateOfflineMap:
return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s";
case nco_rgr_GenerateOverlapMesh:
return "GenerateOverlapMesh --a %s --b %s --out %s";
case nco_rgr_GenerateRLLMesh:
return "GenerateRLLMesh --lat %d --lon %d --file %s";
case nco_rgr_GenerateTestData:
return "GenerateTestData --mesh %s --np %d --test %d --out %s";
case nco_rgr_MeshToTxt:
return "MeshToTxt";
case nco_rgr_AAA_nil:
case nco_rgr_ZZZ_last:
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_tps_cmd_fmt_sng() */
const char * /* O [sng] String containing regridding command name */
nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */
(const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */
{
/* Purpose: Convert TempestRemap command enum to command string */
switch(nco_tps_cmd){
case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap";
case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms";
case nco_rgr_GenerateCSMesh: return "GenerateCSMesh";
case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData";
case nco_rgr_GenerateICOMesh: return "GenerateICOMesh";
case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh";
case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap";
case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh";
case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh";
case nco_rgr_GenerateTestData: return "GenerateTestData";
case nco_rgr_MeshToTxt: return "MeshToTxt";
case nco_rgr_AAA_nil:
case nco_rgr_ZZZ_last:
default: nco_dfl_case_generic_err(); break;
} /* end switch */
/* Some compilers: e.g., SGI cc, need return statement to end non-void functions */
return (char *)NULL;
} /* end nco_tps_cmd_sng() */
int /* O [enm] Return code */
nco_grd_mk /* [fnc] Create SCRIP-format grid file */
(rgr_sct * const rgr) /* I/O [sct] Regridding structure */
{
/* Purpose: Use grid information to create SCRIP-format grid file
Spherical geometry terminology:
spherical cap = spherical dome = volume cut-off by plane
spherical lune = digon = area bounded by two half-great circles = base of spherical wedge
spherical segment = volume defined by cutting sphere with pair parallel planes
spherical sector = volume subtended by lat1
spherical wedge = ungula = volume subtended by lon2-lon1
spherical zone = area of spherical segment excluding bases
spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of
bearing = angle from true north
geodesic = shortest path between points on a surface
great circle = orthodrome = "straight path" = geodesic of the sphere
convergency = difference (in azimuth?) between great circle tracks at two different positions
conversion angle = angle between geodesic and rhumb line
rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth
Formulae:
http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation
http://williams.best.vwh.net/avform.htm
ACME:
https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids
https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps
NCAR:
yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids
yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids
Global RLL grids:
ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
Maps for global RLL grids:
ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve
ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve
ACME grids:
ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ACME maps:
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear
ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve
ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear
AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids
ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc
AMWG maps old method (no provenance archived):
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve
ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear
AMWG maps with ncremap (preferred method):
ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve
ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear
ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve
ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear
MPAS grids:
NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps
MPAS maps:
ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve
ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear
Regional RLL grids:
ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc
Global RLL skeleton:
ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
Curvilinear grids:
ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc
1-D Latitude (no longitude) grids:
ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc
ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc
ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */
const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */
const double rdn2dgr=180.0/M_PI;
const double dgr2rdn=M_PI/180.0;
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */
const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */
const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */
const int itr_nbr_max=20; // [nbr] Maximum number of iterations
const nc_type crd_typ=NC_DOUBLE;
char *fl_out_tmp=NULL_CEWI;
char *fl_out;
char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */
char dmn_sz_nm[]="grid_dims";
char grd_crn_lat_nm[]="grid_corner_lat";
char grd_crn_lon_nm[]="grid_corner_lon";
char grd_crn_nm[]="grid_corners";
char grd_ctr_lat_nm[]="grid_center_lat";
char grd_ctr_lon_nm[]="grid_center_lon";
char grd_rnk_nm[]="grid_rank";
char grd_sz_nm[]="grid_size";
char msk_nm[]="grid_imask";
double *grd_ctr_lat; /* [dgr] Latitude centers of grid */
double *grd_ctr_lon; /* [dgr] Longitude centers of grid */
double *grd_crn_lat; /* [dgr] Latitude corners of grid */
double *grd_crn_lon; /* [dgr] Longitude corners of grid */
double *area; /* [sr] Area of grid */
double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */
double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */
double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */
double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */
double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */
double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */
double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */
double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */
double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */
double area_ttl=0.0; /* [frc] Exact sum of area */
double lat_crv; /* [dgr] Latitudinal curvilinearity */
double lon_crv; /* [dgr] Longitudinal curvilinearity */
double lat_nrt; /* [dgr] Latitude of northern edge of grid */
double lat_sth; /* [dgr] Latitude of southern edge of grid */
double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */
double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */
double lon_est; /* [dgr] Longitude of eastern edge of grid */
double lon_wst; /* [dgr] Longitude of western edge of grid */
double lon_ncr; /* [dgr] Longitude increment */
double lat_ncr; /* [dgr] Latitude increment */
double lon_spn; /* [dgr] Longitude span */
double lat_spn; /* [dgr] Latitude span */
double *wgt_Gss=NULL; // [frc] Gaussian weights double precision
int *msk=NULL; /* [flg] Mask of grid */
int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */
int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int area_id; /* [id] Area variable ID */
int dmn_id_grd_crn; /* [id] Grid corners dimension ID */
int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */
int dmn_id_grd_sz; /* [id] Grid size dimension ID */
int dmn_sz_int_id; /* [id] Grid dimension sizes ID */
int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */
int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */
int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */
int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */
int itr_cnt; /* Iteration counter */
int msk_id; /* [id] Mask variable ID */
long dmn_srt[dmn_nbr_grd_max];
long dmn_cnt[dmn_nbr_grd_max];
long bnd_nbr; /* [nbr] Number of bounds in gridcell */
long col_nbr; /* [nbr] Number of columns in grid */
long crn_idx; /* [idx] Counting index for corners */
long grd_crn_nbr; /* [nbr] Number of corners in gridcell */
long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */
long grd_sz_nbr; /* [nbr] Number of gridcells in grid */
long idx2; /* [idx] Counting index for unrolled grids */
long idx; /* [idx] Counting index for unrolled grids */
long lat_idx2; /* [idx] Counting index for unrolled latitude */
long lat_idx;
long lat_nbr; /* [nbr] Number of latitudes in grid */
long lon_idx2; /* [idx] Counting index for unrolled longitude */
long lon_idx;
long lon_nbr; /* [nbr] Number of longitudes in grid */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=True; /* Option O */
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */
nco_bool flg_grd_1D=False;
nco_bool flg_grd_2D=False;
nco_bool flg_grd_crv=False;
nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */
nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */
nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */
nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */
nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
dfl_lvl=rgr->dfl_lvl;
grd_typ=rgr->grd_typ; /* [enm] Grid type */
fl_out=rgr->fl_grd;
fl_out_fmt=rgr->fl_out_fmt;
lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */
lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */
lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */
lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */
lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */
lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */
lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */
lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */
lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */
lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */
lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */
/* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */
if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True;
if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False;
/* Assume 2D grid */
flg_grd_2D=True;
grd_rnk_nbr=dmn_nbr_2D;
/* Assume quadrilaterals */
grd_crn_nbr=4;
/* Assume rectangles */
bnd_nbr=2;
col_nbr=lat_nbr*lon_nbr;
grd_sz_nbr=lat_nbr*lon_nbr;
/* Allocate space for output data */
area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT));
msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT));
lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ));
lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ));
lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ));
wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ));
grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ));
/* Define variable values */
int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */
int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */
if(grd_rnk_nbr == dmn_nbr_2D){
lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */
lat_psn=1;
} /* !flg_grd_in_2D */
dmn_sz_int[lon_psn]=lon_nbr;
dmn_sz_int[lat_psn]=lat_nbr;
for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1;
/* Compute rectangular arrays
NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */
/* 20150827:
Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst
New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token
Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells
Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors)
Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them)
Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds
1x1 degree global grid with first longitude centered at Greenwich:
--lon_nbr=360 --lon_typ Grn_ctr
--lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5
1x1 degree global grid with Greenwich at west edge of first longitude:
--lon_nbr=360 --lon_typ Grn_wst
--lon_nbr=360 --lon_wst=0.0 --lon_est=360.0
1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell:
--lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5
1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells
--lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */
/* Were east/west longitude bounds set explicitly or implicitly?
NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */
if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb;
if(lon_wst == NC_MAX_DOUBLE){
/* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */
switch(lon_typ){
case nco_grd_lon_bb:
case nco_grd_lon_Grn_ctr:
case nco_grd_lon_Grn_wst:
lon_wst=0.0;
break;
case nco_grd_lon_180_ctr:
case nco_grd_lon_180_wst:
lon_wst=-180.0;
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lon_typ */
} /* !lon */
if(lon_est == NC_MAX_DOUBLE){
/* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */
switch(lon_typ){
case nco_grd_lon_bb:
case nco_grd_lon_Grn_ctr:
case nco_grd_lon_Grn_wst:
lon_est=360.0;
break;
case nco_grd_lon_180_ctr:
case nco_grd_lon_180_wst:
lon_est=180.0;
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lon_typ */
} /* !lon */
/* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */
lon_spn=lon_est-lon_wst;
lon_ncr=lon_spn/lon_nbr;
/* Centering: If user did not set explicit longitude bounds then... */
if(lon_typ != nco_grd_lon_bb)
/* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */
if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0);
/* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */
lon_est=lon_wst+lon_ncr*lon_nbr;
/* lon_wst and lon_est have been set and will not change */
assert(lon_wst < lon_est);
lon_ntf[0L]=lon_wst;
lon_ntf[lon_nbr]=lon_est;
for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++)
lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr;
/* Ensure rounding errors do not produce unphysical grid */
lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn;
/* Finished with longitude, now tackle latitude */
/* Were south/north latitude bounds set explicitly or implicitly? */
// if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb;
if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0;
if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0;
/* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */
lat_spn=lat_nrt-lat_sth;
lat_ncr=lat_spn/lat_nbr;
const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65)
double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision
/* Create S->N grid. If user requested N->S, flip grid at end */
// if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt;
lat_ntf[0L]=lat_sth;
switch(lat_typ){
case nco_grd_lat_fv:
lat_ncr=lat_spn/(lat_nbr-1L);
lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr;
for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++)
lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr;
break;
case nco_grd_lat_eqa:
lat_ncr=lat_spn/lat_nbr;
for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++)
lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr;
break;
case nco_grd_lat_gss:
lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double));
(void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss);
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]);
/* First guess for lat_ntf is midway between Gaussian abscissae */
for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++)
lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]);
/* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */
for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){
double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */
double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */
const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink)
itr_cnt=0;
lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L]));
fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss;
while(fabs(fofx_at_x0) > eps_rlt_cnv){
/* Newton-Raphson iteration:
Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution)
f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid
f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid
dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid
dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid
x_better=x0-f(x0)/f'(x0) */
dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]);
/* 20190613: n2s latitudes are constructed s2n and flipped to n2s later
Hence next line is commented-out in construction mode but used in infer mode */
// if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0;
lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */
lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L]));
fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss;
if(++itr_cnt > itr_nbr_max){
(void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx);
nco_exit(EXIT_FAILURE);
} /* endif */
} /* !while */
} /* !lat_idx */
/* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */
if(lat_nbr%2){
/* lat_nbr is odd */
for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L];
}else{
/* lat_nbr is even */
for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx];
} /* !flg_lat_evn */
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lat_typ */
/* Ensure rounding errors do not produce unphysical grid */
lat_ntf[lat_nbr]=lat_nrt;
if(nco_dbg_lvl_get() > nco_dbg_old){
(void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr);
(void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n");
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){
(void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]);
} /* !lat_idx */
} /* !dbg */
/* Always define longitude centers midway between interfaces */
for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++)
lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]);
/* Many grids have center latitude equally spaced between interfaces */
if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]);
} /* !lat_typ */
/* Cap grids excepted---they place centers of first/last gridcells at poles */
if(lat_typ == nco_grd_lat_fv){
lat_ctr[0L]=lat_ntf[0L];
for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++)
lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]);
lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr];
} /* !cap */
/* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */
if(lat_typ == nco_grd_lat_gss){
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]);
} /* !Gaussian */
for(idx=0L;idx<lon_nbr;idx++){
lon_bnd[2*idx]=lon_ntf[idx];
lon_bnd[2*idx+1L]=lon_ntf[idx+1L];
} /* !idx */
for(idx=0L;idx<lat_nbr;idx++){
lat_bnd[2*idx]=lat_ntf[idx];
lat_bnd[2*idx+1L]=lat_ntf[idx+1L];
} /* !idx */
if(nco_dbg_lvl_get() >= nco_dbg_crr){
for(idx=0L;idx<lat_nbr;idx++){
(void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]);
for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++)
(void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", ");
} /* end loop over lat */
} /* endif dbg */
/* Use centers and boundaries to diagnose latitude weights */
switch(lat_typ){
case nco_grd_lat_eqa:
case nco_grd_lat_fv:
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx]));
break;
case nco_grd_lat_gss:
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx];
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lat_typ */
/* Fuzzy test of latitude weight normalization
20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150
20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280
Newton-Raphson method of interface determination may need improvement to fix that
Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */
//const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */
const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */
lat_wgt_ttl=0.0;
for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx];
if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){
double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */
lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L]));
if(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){
(void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc);
nco_exit(EXIT_FAILURE);
} /* !imprecise */
} /* !nco_grd_lat_eqa, !nco_grd_lat_fv */
/* 20180831 Code above assumes grids run S->N
User can request N->S grids with --rgr lat_drc=n2s
If so, flip grid before unrolling into output arrays */
if(!flg_s2n){
double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */
double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */
double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */
lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ));
lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
long tmp_idx; /* [idx] Temporary index for swapping values */
for(idx=0L;idx<lat_nbr;idx++){
lat_ctr_tmp[idx]=lat_ctr[idx];
lat_wgt_tmp[idx]=lat_wgt[idx];
} /* !idx */
for(idx=0L;idx<lat_nbr;idx++){
tmp_idx=lat_nbr-idx-1L;
lat_ctr[idx]=lat_ctr_tmp[tmp_idx];
lat_wgt[idx]=lat_wgt_tmp[tmp_idx];
} /* !idx */
for(idx=0L;idx<lat_nbr+1L;idx++){
lat_ntf_tmp[idx]=lat_ntf[idx];
} /* !idx */
for(idx=0L;idx<lat_nbr+1L;idx++){
tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */
lat_ntf[idx]=lat_ntf_tmp[tmp_idx];
} /* !idx */
for(idx=0L;idx<lat_nbr;idx++){
lat_bnd[2*idx]=lat_ntf[idx];
lat_bnd[2*idx+1L]=lat_ntf[idx+1L];
} /* !idx */
if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp);
if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp);
if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp);
} /* !flg_s2n */
assert(grd_crn_nbr == 4);
for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){
idx=grd_crn_nbr*lon_idx;
lon_crn[idx]=lon_ntf[lon_idx];
lon_crn[idx+1L]=lon_ntf[lon_idx+1L];
lon_crn[idx+2L]=lon_ntf[lon_idx+1L];
lon_crn[idx+3L]=lon_ntf[lon_idx];
} /* !lon_idx */
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){
idx=grd_crn_nbr*lat_idx;
lat_crn[idx]=lat_ntf[lat_idx];
lat_crn[idx+1L]=lat_ntf[lat_idx];
lat_crn[idx+2L]=lat_ntf[lat_idx+1L];
lat_crn[idx+3L]=lat_ntf[lat_idx+1L];
} /* !lat_idx */
/* Stuff rectangular arrays into unrolled arrays */
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){
for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){
idx=lat_idx*lon_nbr+lon_idx;
grd_ctr_lat[idx]=lat_ctr[lat_idx];
grd_ctr_lon[idx]=lon_ctr[lon_idx];
for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){
idx2=grd_crn_nbr*idx+crn_idx;
lat_idx2=lat_idx*grd_crn_nbr+crn_idx;
lon_idx2=lon_idx*grd_crn_nbr+crn_idx;
grd_crn_lat[idx2]=lat_crn[lat_idx2];
grd_crn_lon[idx2]=lon_crn[lon_idx2];
} /* !crn */
} /* !lon */
} /* !lat */
if(flg_grd_crv){
/* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){
for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){
idx=lat_idx*lon_nbr+lon_idx;
grd_ctr_lat[idx]+=lon_idx*lat_crv;
grd_ctr_lon[idx]+=lat_idx*lon_crv;
for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){
idx2=grd_crn_nbr*idx+crn_idx;
lat_idx2=lat_idx*grd_crn_nbr+crn_idx;
lon_idx2=lon_idx*grd_crn_nbr+crn_idx;
grd_crn_lat[idx2]=lat_crn[lat_idx2];
grd_crn_lon[idx2]=lon_crn[lon_idx2];
if(crn_idx == 0L || crn_idx == 1L){
grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */
grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */
}else if(crn_idx == 2L || crn_idx == 3L){
grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */
grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */
} /* !crn */
} /* !crn */
} /* !lon */
} /* !lat */
} /* !flg_grd_crv */
/* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids
Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */
if(!flg_s2n){
nco_bool flg_ccw; /* [flg] Gridcell is CCW */
const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */
const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */
for(idx=0L;idx<grd_sz_nbr;idx++){
idx2=grd_crn_nbr*idx;
flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl);
if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx);
} /* !idx */
} /* !flg_s2n */
if(nco_dbg_lvl_get() >= nco_dbg_std){
long int idx_crn_ll;
long int idx_crn_lr;
long int idx_crn_ur;
long int idx_crn_ul;
long idx_dbg;
idx_dbg=rgr->idx_dbg;
idx_crn_ll=grd_crn_nbr*idx_dbg+0L;
idx_crn_lr=grd_crn_nbr*idx_dbg+1L;
idx_crn_ur=grd_crn_nbr*idx_dbg+2L;
idx_crn_ul=grd_crn_nbr*idx_dbg+3L;
(void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]);
} /* !dbg */
if(flg_grd_crv){
/* Area of arbitrary curvilinear grids requires spherical trigonometry */
nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area);
}else{
/* Area of rectangular spherical zones from elementary calculus results
20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15
area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx]));
Gain not worth the extra complexity */
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++)
/* fabs() ensures positive area in n2s grids */
area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx])));
} /* !flg_grd_2D */
if(nco_dbg_lvl_get() >= nco_dbg_sbr){
lat_wgt_ttl=0.0;
area_ttl=0.0;
if(flg_grd_2D){
(void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm);
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
lat_wgt_ttl+=lat_wgt[lat_idx];
} /* !flg_grd_2D */
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++)
area_ttl+=area[lat_idx*lon_nbr+lon_idx];
(void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI));
assert(area_ttl > 0.0);
assert(area_ttl <= 4.0*M_PI);
} /* endif dbg */
/* Open grid file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Define dimensions */
rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn);
rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz);
rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk);
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
/* Define variables */
(void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */
(void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_grd_sz;
dmn_ids[1]=dmn_id_grd_crn;
(void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_grd_sz;
dmn_ids[1]=dmn_id_grd_crn;
(void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl);
/* Define global and "units" attributes */
char *att_val;
rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl);
rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP");
const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */
rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp);
rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO");
(void)nco_hst_att_cat(out_id,rgr->cmd_ln);
(void)nco_vrs_att_cat(out_id);
rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ));
rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ));
rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)");
rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid");
rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle");
rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian");
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude");
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm);
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude");
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm);
rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices");
rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude");
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices");
rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude");
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid");
rcd=nco_char_att_put(out_id,msk_nm,"units","none");
/* Begin data mode */
(void)nco_enddef(out_id);
/* Write variables */
dmn_srt[0]=0L;
dmn_cnt[0]=grd_rnk_nbr;
rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ);
dmn_srt[0]=0L;
dmn_srt[1]=0L;
dmn_cnt[0]=grd_sz_nbr;
dmn_cnt[1]=grd_crn_nbr;
rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ);
dmn_srt[0]=0L;
dmn_srt[1]=0L;
dmn_cnt[0]=grd_sz_nbr;
dmn_cnt[1]=grd_crn_nbr;
rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ);
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
fl_out=rgr->fl_skl;
if(fl_out){
/* Write skeleton data file on requested grid
Skeleton file can then be populated with data for testing */
char *area_nm;
char *bnd_nm;
// char *bnd_tm_nm;
char *col_nm_out;
char *lat_nm_out; /* [sng] Name of output dimension for latitude */
char *lat_wgt_nm;
char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */
char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */
char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */
// int area_id; /* [id] Variable ID for area */
int dmn_id_bnd; /* [id] Dimension ID */
//int dmn_id_bnd_tm; /* [id] Dimension ID */
int dmn_id_col; /* [id] Dimension ID */
int dmn_id_lat; /* [id] Dimension ID */
int dmn_id_lon; /* [id] Dimension ID */
int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */
int lat_id; /* [id] Variable ID for latitude */
int lat_wgt_id; /* [id] Variable ID for latitude weight */
int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */
int lon_id; /* [id] Variable ID for longitude */
/* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */
if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat");
if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon");
if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol");
/* Name output dimensions */
area_nm=rgr->area_nm;
bnd_nm=rgr->bnd_nm;
//bnd_tm_nm=rgr->bnd_tm_nm;
lat_bnd_nm=rgr->lat_bnd_nm;
lat_wgt_nm=rgr->lat_wgt_nm;
lon_bnd_nm=rgr->lon_bnd_nm;
/* Use names discovered by fuzzing */
if(flg_grd_1D){
bnd_nm=rgr->vrt_nm;
lat_bnd_nm=rgr->lat_vrt_nm;
lon_bnd_nm=rgr->lon_vrt_nm;
} /* !flg_grd_1D */
if(flg_grd_2D){
bnd_nm=rgr->bnd_nm;
lat_bnd_nm=rgr->lat_bnd_nm;
lon_bnd_nm=rgr->lon_bnd_nm;
} /* !flg_grd_2D */
/* Open grid file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Define dimensions */
if(flg_grd_crv){
rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd);
}else{
rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd);
} /* !flg_grd_crv */
if(flg_grd_1D){
rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col);
} /* !flg_grd_1D */
if(flg_grd_2D){
rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat);
rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon);
} /* !flg_grd_2D */
/* Define new coordinates and variables in regridded file */
if(flg_grd_1D){
(void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_col;
dmn_ids[1]=dmn_id_bnd;
(void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_col;
dmn_ids[1]=dmn_id_bnd;
(void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl);
} /* !flg_grd_1D */
if(flg_grd_crv){
dmn_ids[0]=dmn_id_lat;
dmn_ids[1]=dmn_id_lon;
(void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_lat;
dmn_ids[1]=dmn_id_lon;
dmn_ids[2]=dmn_id_bnd;
(void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
}else if(flg_grd_2D){
(void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_lat;
dmn_ids[1]=dmn_id_bnd;
(void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_lon;
dmn_ids[1]=dmn_id_bnd;
(void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_lat;
dmn_ids[1]=dmn_id_lon;
(void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl);
} /* !flg_grd_2D */
/* Define attributes */
rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl);
rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6");
rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp);
(void)nco_hst_att_cat(out_id,rgr->cmd_ln);
(void)nco_vrs_att_cat(out_id);
rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ));
rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ));
rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell");
rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle");
rcd=nco_char_att_put(out_id,area_nm,"units","steradian");
char *crd_val_sng; /* CF-standard coordinates values string */
size_t crd_val_sng_lng=strlen(lat_nm_out)+strlen(lon_nm_out)+1L;
crd_val_sng=(char *)nco_malloc(crd_val_sng_lng*sizeof(char)+1L);
(void)sprintf(crd_val_sng,"%s %s",lat_nm_out,lon_nm_out);
rcd=nco_char_att_put(out_id,area_nm,"coordinates",crd_val_sng);
if(crd_val_sng) crd_val_sng=(char *)nco_free(crd_val_sng);
rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude");
rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north");
rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y");
rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm);
if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices");
rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val);
if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)");
rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude");
rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east");
rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X");
rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm);
if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices");
rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val);
/* Begin data mode */
(void)nco_enddef(out_id);
/* Write new coordinates and variables to regridded file */
if(flg_grd_1D){
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr;
(void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr;
(void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=col_nbr;
dmn_cnt[1]=bnd_nbr;
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=col_nbr;
dmn_cnt[1]=bnd_nbr;
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr;
(void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
} /* !flg_grd_1D */
if(flg_grd_crv){
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=lon_nbr;
(void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ);
(void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ);
(void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=lon_nbr;
dmn_cnt[2]=grd_crn_nbr;
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ);
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ);
}else if(flg_grd_2D){
dmn_srt[0]=0L;
dmn_cnt[0]=lat_nbr;
(void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=lon_nbr;
(void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=lat_nbr;
(void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=bnd_nbr;
(void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lon_nbr;
dmn_cnt[1]=bnd_nbr;
(void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=lon_nbr;
(void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
} /* !flg_grd_2D */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
} /* !fl_out */
/* Free memory associated with input file */
if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int);
if(msk) msk=(int *)nco_free(msk);
if(area) area=(double *)nco_free(area);
if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat);
if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon);
if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat);
if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon);
if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd);
if(lat_crn) lat_crn=(double *)nco_free(lat_crn);
if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr);
if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf);
if(lat_sin) lat_sin=(double *)nco_free(lat_sin);
if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt);
if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd);
if(lon_crn) lon_crn=(double *)nco_free(lon_crn);
if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr);
if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf);
if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss);
return rcd;
} /* !nco_grd_mk() */
int /* O [enm] Return code */
nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */
(rgr_sct * const rgr) /* I/O [sct] Regridding structure */
{
/* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file
Test curvilinear grids:
ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc
ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */
const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */
const double rdn2dgr=180.0/M_PI;
const double dgr2rdn=M_PI/180.0;
const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */
const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */
const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */
const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */
const int itr_nbr_max=20; // [nbr] Maximum number of iterations
const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */
const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */
const nc_type crd_typ=NC_DOUBLE;
char *area_nm_in=NULL;
char *fl_in;
char *fl_out;
char *fl_out_tmp=NULL_CEWI;
char *fl_pth_lcl=NULL;
char *msk_nm_in=NULL;
char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */
/* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */
char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */
char dmn_sz_nm[]="grid_dims";
char grd_crn_lat_nm[]="grid_corner_lat";
char grd_crn_lon_nm[]="grid_corner_lon";
char grd_crn_nm[]="grid_corners";
char grd_ctr_lat_nm[]="grid_center_lat";
char grd_ctr_lon_nm[]="grid_center_lon";
char grd_rnk_nm[]="grid_rank";
char grd_sz_nm[]="grid_size";
char msk_nm[]="grid_imask";
char unt_sng[]="units"; /* netCDF-standard units attribute name */
double *grd_ctr_lat; /* [dgr] Latitude centers of grid */
double *grd_ctr_lon; /* [dgr] Longitude centers of grid */
double *grd_crn_lat; /* [dgr] Latitude corners of grid */
double *grd_crn_lon; /* [dgr] Longitude corners of grid */
double *area; /* [sr] Area of grid */
double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */
double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */
double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */
double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */
double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */
double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */
double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */
double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */
double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */
double *vrt_lat=NULL; /* [rdn] MPAS latitude boundary variable latVertex */
double *vrt_lon=NULL; /* [rdn] MPAS longitude boundary variable lonVertex */
double area_ttl=0.0; /* [frc] Exact sum of area */
//double lat_nrt; /* [dgr] Latitude of northern edge of grid */
double lat_sth; /* [dgr] Latitude of southern edge of grid */
double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */
double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */
// double lon_est; /* [dgr] Longitude of eastern edge of grid */
double lon_wst; /* [dgr] Longitude of western edge of grid */
double lon_ncr; /* [dgr] Longitude increment */
double lat_ncr; /* [dgr] Latitude increment */
double lon_spn; /* [dgr] Longitude span */
double lat_spn; /* [dgr] Latitude span */
double mss_val_area_dbl;
double mss_val_ctr_dbl;
double mss_val_msk_dbl;
int *msk=NULL; /* [flg] Mask of grid */
int *vrt_cll=NULL; /* [enm] MPAS variable verticesOnCell */
int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */
int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int dmn_idx; /* [idx] Dimension index */
int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */
int in_id; /* I [id] Input netCDF file ID */
int md_open; /* [enm] Mode flag for nc_open() call */
int out_id; /* I [id] Output netCDF file ID */
int rcd=NC_NOERR;
int area_id=NC_MIN_INT; /* [id] Area variable ID */
int dmn_id_grd_crn; /* [id] Grid corners dimension ID */
int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */
int dmn_id_grd_sz; /* [id] Grid size dimension ID */
int dmn_sz_int_id; /* [id] Grid dimension sizes ID */
int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */
int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */
int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */
int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */
int itr_cnt; /* Iteration counter */
int lat_rnk; /* [nbr] Rank of latitude coordinate */
int lon_rnk; /* [nbr] Rank of longitude coordinate */
int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */
int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */
int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */
int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */
int msk_id=NC_MIN_INT; /* [id] Mask variable ID */
int msk_rnk_nbr; /* [id] Mask rank */
int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */
int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */
int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */
int var_id; /* [id] Current variable ID */
int vrt_cll_id=NC_MIN_INT; /* [id] MPAS variable verticesOnCell ID */
int vrt_lat_id=NC_MIN_INT; /* [id] MPAS latitude boundary variable latVertex ID */
int vrt_lon_id=NC_MIN_INT; /* [id] MPAS longitude boundary variable lonVertex ID */
long dmn_srt[dmn_nbr_grd_max];
long dmn_cnt[dmn_nbr_grd_max];
long bnd_idx;
long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */
long col_idx;
long col_nbr; /* [nbr] Number of columns in grid */
long crn_idx; /* [idx] Counting index for corners */
long ttl_idx; /* [idx] Total (unrolled) counting index for grid+corners */
long dmn_sz; /* [nbr] Size of current dimension */
long grd_crn_nbr; /* [nbr] Number of corners in gridcell */
long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */
long grd_sz_nbr; /* [nbr] Number of gridcells in grid */
long idx2; /* [idx] Counting index for unrolled grids */
long idx; /* [idx] Counting index for unrolled grids */
long idx_crn;
long idx_ctr;
long idx_fst; /* [idx] Index offset */
long idx_tmp; /* [idx] Temporary index */
long lat_idx2; /* [idx] Counting index for unrolled latitude */
long lat_idx;
long lat_nbr; /* [nbr] Number of latitudes in grid */
long lon_idx2; /* [idx] Counting index for unrolled longitude */
long lon_idx;
long lon_nbr; /* [nbr] Number of longitudes in grid */
long vrt_idx; /* [idx] Counting index for vertices */
long vrt_nbr; /* [nbr] Number of vertices in MPAS grid */
long int idx_crn_ll;
long int idx_crn_lr;
long int idx_crn_ur;
long int idx_crn_ul;
nco_bool FL_RTR_RMT_LCN;
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=True; /* Option O */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */
nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */
nco_bool flg_1D_mpas_bnd=False; /* [flg] Unstructured input grid with MPAS bounds */
nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */
nco_bool flg_ccw; /* [flg] Gridcell is CCW */
nco_bool flg_grd_1D=False;
nco_bool flg_grd_2D=False;
nco_bool flg_grd_crv=False;
nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */
nco_bool flg_wrt_crn=True;
nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */
nco_bool use_mss_val_area=False;
nco_bool has_mss_val_area=False;
nco_bool has_mss_val_bnd=False;
nco_bool has_mss_val_ctr=False;
nco_bool has_mss_val_msk=False;
nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */
nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */
nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */
nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */
nc_type msk_typ;
ptr_unn msk_unn;
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
/* Algorithm:
Read grid information from input data file (aka *_in)
Close input file
Once grid dimensions known, allocate output grid arrays (aka *_out)
Open output file (aka grid-file)
Use guesswork and standard algorithms to fill-in output arrays */
/* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */
fl_in=(char *)strdup(rgr->fl_in);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */
char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */
char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */
char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */
char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */
char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */
char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */
char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */
char *vrt_dmn_nm=NULL_CEWI; /* [sng] Name of MPAS vertices dimension nVertices */
char *vrt_cll_nm=NULL_CEWI; /* [sng] Name of MPAS variable verticesOnCell */
char *vrt_lat_nm=NULL_CEWI; /* [sng] Name of MPAS latitude boundary variable latVertex */
char *vrt_lon_nm=NULL_CEWI; /* [sng] Name of MPAS longitude boundary variable lonVertex */
int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */
int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */
int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */
int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */
int dmn_id_vrt=NC_MIN_INT; /* [id] Dimension ID for MPAS vertices */
/* Begin CF-coordinates block */
cf_crd_sct *cf=NULL;
char *rgr_var; /* [sng] Variable for special regridding treatment */
nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */
rgr_var=rgr->var_nm;
if(rgr_var){
/* Infer grid from special variable
Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g.,
ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates
4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ;
Usage:
ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */
char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */
cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct));
cf->crd=False; /* [flg] CF coordinates information is complete */
cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */
cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */
cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */
cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */
cf->crd_sng=NULL; /* [sng] Coordinates attribute value */
cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */
cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */
cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */
cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */
cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */
cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */
cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */
cf->var_nm=NULL; /* [sng] Coordinates variable name */
cf->var_type=NC_NAT; /* [enm] Coordinates variable type */
if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var);
goto skp_cf;
} /* !rcd */
cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng);
if(cf->crd_sng){
cf->crd=True;
}else{ /* !rcd && att_typ */
(void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var);
goto skp_cf;
} /* !rcd && att_typ */
/* Valid coordinates attribute requires two coordinate names separated by space character */
char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */
char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */
char *spc_ptr; /* [sng] Pointer to space character (' ') */
int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */
int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */
int crd_idx=0; /* [idx] Counter for coordinate names */
for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL;
crd_dpl=(char *)strdup(cf->crd_sng);
/* Search for spaces starting from end of string */
while((spc_ptr=strrchr(crd_dpl,' '))){
crd_nm[crd_nbr]=spc_ptr+1L;
crd_nbr++;
/* NUL-terminate so next search ends here */
*spc_ptr='\0';
} /* !sbs_ptr */
/* Final coordinate name begins where coordinate string starts */
crd_nm[crd_nbr]=crd_dpl;
/* Change crd_nbr from 0-based index to actual coordinate number */
crd_nbr++;
if(crd_nbr < 2){
(void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng);
goto skp_cf;
} /* !crd_nbr */
/* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */
crd_idx=crd_spt=0;
while(crd_spt < 2 && crd_idx < crd_nbr){
cf->crd_nm[crd_spt]=crd_nm[crd_idx];
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){
cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng);
if(cf->unt_sng[crd_spt]){
if(strcasestr(cf->unt_sng[crd_spt],"degree")){
/* Increment count of spatial-like coordinates... */
crd_spt++;
}else{
/* ...or free() memory allocated during search */
cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]);
} /* !strcasestr() */
crd_idx++;
} /* !rcd && att_typ */
} /* !rcd */
} /* !crd_spt */
/* If while()-loop above was successful, our search is over
Otherwise, use first two coordinate names regardless of units, and print more diagnostics */
if(crd_spt < 2){
cf->crd_nm[0]=crd_nm[0];
cf->crd_nm[1]=crd_nm[1];
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]);
goto skp_cf;
} /* !rcd */
if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){
(void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]);
goto skp_cf;
} /* !rcd */
cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng);
if(cf->unt_sng[0]){
if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]);
} /* !rcd && att_typ */
cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng);
if(cf->unt_sng[1]){
if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]);
} /* !rcd && att_typ */
} /* !crd_spt */
int crd_rnk; /* [nbr] Coordinate rank */
rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk);
if(crd_rnk != 2){
(void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk);
goto skp_cf;
} /* !crd_rnk */
rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id);
cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR));
cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR));
rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]);
rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]);
/* "coordinates" convention does not guarantee lat, lon are specified in that order
Use "units" values, if any, to determine order
In absence of "units", assume order is lat, lon */
nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */
nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */
nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */
nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */
if(cf->unt_sng[0]){
if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True;
if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True;
} /* endif */
if(cf->unt_sng[1]){
if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True;
if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True;
} /* endif */
assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat));
int idx_lat;
int idx_lon;
if(crd0_is_lat && crd1_is_lon){
idx_lat=0;
idx_lon=1;
}else{
idx_lat=1;
idx_lon=0;
} /* endif */
/* Dimensions and coordinates have been vetted. Store as primary lookup names.
Dimensions are always returned in order [LRV,MRV]=[0,1]
LRV is along-track direction, and MRV is across-track (at least in NASA data)
Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */
dmn_id_lat=cf->dmn_id[0];
dmn_id_lon=cf->dmn_id[1];
/* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */
//lat_nm_in=strdup(cf->dmn_nm[0]);
//lon_nm_in=strdup(cf->dmn_nm[1]);
lat_nm_in=strdup(cf->crd_nm[idx_lat]);
lon_nm_in=strdup(cf->crd_nm[idx_lon]);
/* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */
lat_ctr_id=cf->crd_id[idx_lat];
lon_ctr_id=cf->crd_id[idx_lon];
lat_dmn_nm=strdup(cf->dmn_nm[0]);
lon_dmn_nm=strdup(cf->dmn_nm[1]);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)");
/* Clean-up CF coordinates memory */
if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl);
if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng);
if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]);
if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]);
if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]);
if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]);
} /* !rgr_var */
/* goto skp_cf */
skp_cf:
/* free() any abandoned cf structure now */
if(!flg_cf)
if(cf) cf=(cf_crd_sct *)nco_free(cf);
rcd=NC_NOERR;
/* End CF-coordinates block */
/* Locate fields that must be present in input file
Required variables are usually latitude and longitude
Currently these variables must be in root group
This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude]
fxm: Generalize with traversal table so usual suspect coordinates may be in any group */
if(lat_ctr_id == NC_MIN_INT){
if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in);
else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude");
else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */
else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */
else if((rcd=nco_inq_varid_flg(in_id,"lat_d",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat_d"); /* EAM dynamics grid */
else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat");
else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */
else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */
else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */
else if((rcd=nco_inq_varid_flg(in_id,"LATIXY",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LATIXY"); /* CISM/CLM/ELM */
else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */
else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */
else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */
else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */
else if((rcd=nco_inq_varid_flg(in_id,"rlat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */
else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */
else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */
else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */
else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */
else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */
else if((rcd=nco_inq_varid_flg(in_id,"south_north",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */
else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */
} /* !lat_ctr_id */
if(lon_ctr_id == NC_MIN_INT){
if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in);
else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude");
else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */
else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */
else if((rcd=nco_inq_varid_flg(in_id,"lon_d",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* EAM dynamics grid */
else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon");
else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */
else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */
else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */
else if((rcd=nco_inq_varid_flg(in_id,"LONGXY",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LONGXY"); /* CISM/CLM/ELM */
else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */
else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */
else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */
else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */
else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */
else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */
else if((rcd=nco_inq_varid_flg(in_id,"rlon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* RACMO */
else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */
else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */
else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */
else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */
else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */
else if((rcd=nco_inq_varid_flg(in_id,"west_east",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */
else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */
} /* !lon_ctr_id */
if(!lat_nm_in || !lon_nm_in){
(void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !lat_nm_in */
/* Rank of coordinates determines whether grid is curvilinear */
rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk);
rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk);
/* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */
if(lat_rnk*lon_rnk == 1){
rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat);
rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon);
if(dmn_id_lat == dmn_id_lon){
dmn_id_col=dmn_id_lat;
dmn_id_lat=NC_MIN_INT;
dmn_id_lon=NC_MIN_INT;
rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm);
col_dmn_nm=(char *)strdup(dmn_nm);
flg_grd_1D=True;
} /* !unstructured */
} /* lat_rnk == lon_rnk == 1 */
if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){
flg_grd_crv=False;
flg_grd_2D=True;
} /* !lat_rnk */
if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){
flg_grd_crv=True;
flg_grd_2D=False;
} /* !lat_rnk */
if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){
(void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk);
nco_exit(EXIT_FAILURE);
} /* !3D */
if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False);
/* Scrutinize coordinates for their dimensions
NB: Unstructured already known */
if(flg_grd_2D){
rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm);
lat_dmn_nm=(char *)strdup(dmn_nm);
rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm);
lon_dmn_nm=(char *)strdup(dmn_nm);
} /* !flg_grd_2D */
if(flg_grd_crv){
rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids);
/* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */
dmn_id_lat=dmn_ids[0];
dmn_id_lon=dmn_ids[1];
rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm);
lat_dmn_nm=(char *)strdup(dmn_nm);
rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm);
lon_dmn_nm=(char *)strdup(dmn_nm);
} /* !flg_grd_crv */
if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){
(void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */
/* Locate spatial dimensions that may be present
NB: bounds dimensions may present a special problem
CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension
CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension
CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing
Hence search for nvertices before nbnd to ensure spatial bound is found first */
if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */
else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */
else if((rcd=nco_inq_dimid_flg(in_id,"maxEdges",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("maxEdges"); /* MPAS */
if((rcd=nco_inq_dimid_flg(in_id,"nVertices",&dmn_id_vrt)) == NC_NOERR) vrt_dmn_nm=strdup("nVertices"); /* MPAS */
/* Use dimension IDs to get dimension sizes and grid size */
if(flg_grd_1D){
rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr);
lat_nbr=lon_nbr=col_nbr;
}else{
rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr);
rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr);
col_nbr=NC_MIN_INT;
} /* !flg_grd_1D */
if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr);
if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr);
if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr);
if(flg_grd_1D){
/* Unstructured grid (e.g., CAM-SE) */
grd_rnk_nbr=dmn_nbr_1D;
grd_typ=nco_grd_2D_unk;
lat_typ=nco_grd_lat_unk;
lon_typ=nco_grd_lon_unk;
/* 1D grids without their own boundaries are at the mercy of the weight generator */
if(dmn_id_bnd == NC_MIN_INT){
(void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get());
flg_wrt_crn=False;
/* Input could actually be from grid with no polygonal definition, e.g., CAM-SE
Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc.
Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4
However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file
So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values)
By default do not write grid corner values */
grd_crn_nbr=4;
} /* !dmn_id_bnd */
if(bnd_nbr == 2){
/* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving
latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners.
"convention" is that two latitudes and two longitudes can specify rectangular boundary cell
In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently
Set number of corners to rectangular and leave bnd_nbr as is */
grd_crn_nbr=4;
flg_1D_psd_rct_bnd=True;
} /* !bnd_nbr */
if(!strcmp(bnd_dmn_nm,"maxEdges")){
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Unstructured grid has dimension \"%s\" which indicates an MPAS grid. Will attempt to locate other MPAS information (dimension nVertices and variables verticesOnCell, lonVertex, and latVertex) to construct SCRIP-compliant bounds variables...\n",nco_prg_nm_get(),bnd_dmn_nm);
if((rcd=nco_inq_varid_flg(in_id,"verticesOnCell",&vrt_cll_id)) == NC_NOERR) vrt_cll_nm=strdup("verticesOnCell");
if((rcd=nco_inq_varid_flg(in_id,"lonVertex",&vrt_lon_id)) == NC_NOERR) vrt_lon_nm=strdup("lonVertex");
if((rcd=nco_inq_varid_flg(in_id,"latVertex",&vrt_lat_id)) == NC_NOERR) vrt_lat_nm=strdup("latVertex");
if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr);
if(vrt_dmn_nm && vrt_cll_nm && vrt_lon_nm && vrt_lat_nm){
flg_1D_mpas_bnd=True;
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Found all MPAS information needed to construct SCRIP-compliant bounds variables.\n",nco_prg_nm_get());
}else{
(void)fprintf(stdout,"%s: INFO Unable to find all MPAS information needed to construct SCRIP-compliant bounds variables. Will not write bounds coordinates. This will degrade usefulness of SCRIP file for regridding schemes (e.g., conservative) that require cell boundaries.\n",nco_prg_nm_get());
(void)fprintf(stdout,"%s: HINT Often MPAS restart files contain the required bounds variables (verticesOnCell, lonVertex, latVertex) that normal MPAS data files lack. Try inferring the SCRIP grid from a restart file not a normal time-varying output dataset.\n",nco_prg_nm_get());
flg_wrt_crn=False;
} /* !vrt_cll_nm */
} /* !bnd_dmn_nm */
}else if(flg_grd_2D){ /* !flg_grd_1D */
/* Assume 2D grid of uninitialized type */
grd_rnk_nbr=dmn_nbr_2D;
grd_typ=nco_grd_2D_nil;
lat_typ=nco_grd_lat_nil;
lon_typ=nco_grd_lon_nil;
/* Assume rectangular grids that do not specify otherwise use quadrilaterals */
if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4;
/* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2
This signals rectangular gridcell bounds are interfaces not vertices (to save half the space)
These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */
if(grd_crn_nbr == 2) grd_crn_nbr=4;
/* Convention is to archive only two bounds for rectangular grids (since sides are identical)
Non-quadrilateral rectangular grids are untested */
if(grd_crn_nbr == 4) bnd_nbr=2;
}else if(flg_grd_crv){ /* !flg_grd_2D */
/* Assume curvilinear grid (e.g., WRF) */
flg_grd_2D=False;
grd_rnk_nbr=dmn_nbr_2D;
grd_typ=nco_grd_2D_unk;
lat_typ=nco_grd_lat_unk;
lon_typ=nco_grd_lon_unk;
/* Assume curvilinear grids that do not specify otherwise use quadrilaterals */
if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4;
/* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles
Non-quadrilateral curvilinear grids are untested */
if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False);
} /* !flg_grd_crv */
/* Allocate space for output data */
if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr;
dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT));
area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT));
if(flg_grd_1D){
if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ));
lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ));
}else if(flg_grd_2D){ /* !flg_grd_1D */
lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ));
lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ));
lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ));
lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ));
}else if(flg_grd_crv){ /* !flg_grd_2D */
lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ));
lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ));
lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ));
} /* !flg_grd_crv */
grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ));
grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ));
grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ));
/* Locate fields that may be present in input file */
if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds");
else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds");
else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds");
else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf");
else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices");
else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */
else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */
if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds");
else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds");
else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds");
else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf");
else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices");
else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */
else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */
if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area");
else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area");
else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */
else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area");
else if((rcd=nco_inq_varid_flg(in_id,"area_d",&area_id)) == NC_NOERR) area_nm_in=strdup("area_d"); /* EAM dynamics grid */
else if((rcd=nco_inq_varid_flg(in_id,"area_p",&area_id)) == NC_NOERR) area_nm_in=strdup("area_p"); /* EAM physics grid */
// else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */
else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */
else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */
msk_nm_in=rgr->msk_var;
if(msk_nm_in){
if(!strcasecmp(msk_nm_in,"none")){
/* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */
msk_nm_in=(char *)nco_free(msk_nm_in);
}else{
/* User-supplied name overrides database */
rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id);
} /* !msk_nm_in */
}else{
/* Otherwise search database */
if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask");
else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask");
else if((rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask_b");
else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask");
else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */
else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */
} /* !msk_nm_in */
/* Mask field requires special handling for non-conformant models */
if(msk_id != NC_MIN_INT){
/* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT
20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */
rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr);
if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr);
rcd=nco_inq_vartype(in_id,msk_id,&msk_typ);
msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ));
} /* !msk */
/* All grids:
Some real-world datasets violate convention that coordinates ought never have missing values
CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes
Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */
has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl);
char *att_val;
char *area_unt=NULL; /* [sng] Dimensional units used in area */
char *ngl_unt=NULL; /* [sng] Angular units used in coordinates */
long att_sz;
nc_type att_typ;
nco_bool flg_area_sr=True; /* [flg] Input area is in sterradians not something weird like km2 */
nco_bool flg_crd_rdn=False; /* [flg] Input coordinates are in radians not degrees */
if(flg_grd_1D){
/* Obtain fields that must be present in unstructured input file */
dmn_srt[0]=0L;
dmn_cnt[0]=col_nbr;
rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ);
rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ);
/* Obtain fields that may be present in unstructured input file */
if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
if(msk_id != NC_MIN_INT){
if(msk_rnk_nbr > grd_rnk_nbr){
/* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */
for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){
dmn_srt[dmn_idx]=0L;
dmn_cnt[dmn_idx]=1L;
} /* !dmn_idx */
dmn_srt[dmn_idx]=0L;
dmn_cnt[dmn_idx]=col_nbr;
} /* !msk_rnk_nbr */
rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ);
} /* !msk_id */
dmn_srt[0]=dmn_srt[1]=0L;
if(flg_1D_psd_rct_bnd){
dmn_cnt[0]=col_nbr;
dmn_cnt[1]=bnd_nbr;
if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ);
if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ);
}else if(flg_1D_mpas_bnd){
const long grd_crn_nbrm1=grd_crn_nbr-1L; /* [nbr] Number of corners in gridcell minus one */
vrt_cll=(int *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng((nc_type)NC_INT));
vrt_lat=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ));
vrt_lon=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ));
dmn_cnt[0]=col_nbr;
dmn_cnt[1]=grd_crn_nbr;
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports dimension sizes bnd_nbr=%ld, col_nbr=%ld, grd_crn_nbr=%ld, vrt_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,bnd_nbr,col_nbr,grd_crn_nbr,vrt_nbr);
if(vrt_cll_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_cll_id,dmn_srt,dmn_cnt,vrt_cll,(nc_type)NC_INT);
dmn_cnt[0]=vrt_nbr;
if(vrt_lat_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lat_id,dmn_srt,dmn_cnt,vrt_lat,crd_typ);
if(vrt_lon_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lon_id,dmn_srt,dmn_cnt,vrt_lon,crd_typ);
rcd=nco_inq_att_flg(in_id,vrt_lat_id,unt_sng,&att_typ,&att_sz);
if(rcd == NC_NOERR && att_typ == NC_CHAR){
att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ));
rcd+=nco_get_att(in_id,vrt_lat_id,unt_sng,att_val,att_typ);
/* NUL-terminate attribute before using strstr() */
att_val[att_sz]='\0';
/* Match "radian" and "radians" */
if(strstr(att_val,"radian")) flg_crd_rdn=True;
if(att_val) ngl_unt=(char *)strdup(att_val);
if(att_val) att_val=(char *)nco_free(att_val);
} /* end rcd && att_typ */
for(col_idx=0;col_idx<col_nbr;col_idx++){
idx=col_idx*grd_crn_nbr;
for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){
ttl_idx=idx+crn_idx;
vrt_idx=vrt_cll[ttl_idx];
assert(vrt_idx >= 0);
//if(vrt_idx >= vrt_nbr) (void)fprintf(stdout,"%s: WARNING %s input gridcell %ld corner %ld has extreme MPAS input verticesOnCell value %ld (maximum valid vertex = vrt_nbr-1 = %ld-1 = %ld)\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,vrt_idx,vrt_nbr,vrt_nbr-1);
if(vrt_idx == 0){
/* 20201220: Convert values of zero to neighboring valid vertex index */
for(idx_fst=1;idx_fst<grd_crn_nbr;idx_fst++){
idx_tmp=crn_idx+idx_fst;
/* Wrap to initial corner of this cell when candidate corner would be in next cell */
if(idx_tmp > grd_crn_nbrm1) idx_tmp-=grd_crn_nbr;
ttl_idx=idx+idx_tmp;
vrt_idx=vrt_cll[ttl_idx];
if(vrt_idx != 0) break;
} /* !idx_fst */
assert(idx_fst < grd_crn_nbr);
} /* !vrt_idx */
/* 20201220: Stored vertex indices use Fortran-based convention---subtract one for C */
vrt_idx--;
lat_crn[ttl_idx]=vrt_lat[vrt_idx];
lon_crn[ttl_idx]=vrt_lon[vrt_idx];
//(void)fprintf(stdout,"%s: DEBUG %s reports col_idx = %ld, crn_idx = %ld, ttl_idx = %ld, vrt_idx = %ld, vrt_lat = %g, vrt_lon = %g\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,ttl_idx,vrt_idx,vrt_lat[vrt_idx],vrt_lon[vrt_idx]);
} /* !crn_idx */
} /* !col_idx */
}else{ /* !flg_1D_mpas_bnd */
dmn_cnt[0]=col_nbr;
dmn_cnt[1]=grd_crn_nbr;
if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ);
if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ);
} /* !flg_1D_psd_rct_bnd */
} /* !flg_grd_1D */
if(flg_grd_crv){
/* Obtain fields that must be present in curvilinear input file */
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=lon_nbr;
rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ);
rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ);
/* 20150923: Also input, if present in curvilinear file, corners, area, and mask
area and mask are same size as lat and lon */
if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
if(msk_id != NC_MIN_INT){
if(msk_rnk_nbr > grd_rnk_nbr){
/* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */
for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){
dmn_srt[dmn_idx]=0L;
dmn_cnt[dmn_idx]=1L;
} /* !dmn_idx */
dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L;
dmn_cnt[dmn_idx]=lat_nbr;
dmn_cnt[dmn_idx+1]=lon_nbr;
} /* !msk_rnk_nbr */
rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ);
} /* !msk_id */
/* Corners are on curvilinear corner grid
Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids
Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids
Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP
However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr
Sigh... */
dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L;
if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){
rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids);
if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){
dmn_id_bnd=dmn_ids[2];
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=lon_nbr;
dmn_cnt[2]=grd_crn_nbr;
}else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){
dmn_id_bnd=dmn_ids[0];
dmn_cnt[0]=grd_crn_nbr;
dmn_cnt[1]=lat_nbr;
dmn_cnt[2]=lon_nbr;
flg_crn_grd_lat_lon=True;
}else{
(void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s\". Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in);
lat_bnd_id=NC_MIN_INT;
lon_bnd_id=NC_MIN_INT;
} /* !dmn_ids */
rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ);
rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ);
if(flg_crn_grd_lat_lon){
/* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */
double *lat_crn_tmp=NULL;
double *lon_crn_tmp=NULL;
lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ));
memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double));
memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double));
for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){
for(idx=0;idx<grd_sz_nbr;idx++){
lat_idx=idx/lon_nbr;
lon_idx=idx%lon_nbr;
/* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */
lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx];
lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx];
} /* !idx */
} /* !crn_idx */
if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp);
if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp);
/* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */
for(idx=0;idx<grd_sz_nbr;idx++){
lat_idx=idx/lon_nbr;
lon_idx=idx%lon_nbr;
lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]);
lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]);
} /* !idx */
} /* !flg_crd_grd_lat_lon */
} /* !lat_bnd_id */
} /* !flg_grd_crv */
if(flg_grd_2D){
int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */
int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */
int tpl_id=NC_MIN_INT; /* [id] ID of template field */
/* Obtain fields that must be present in input file */
dmn_srt[0L]=0L;
dmn_cnt[0L]=lat_nbr;
rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ);
dmn_srt[0L]=0L;
dmn_cnt[0L]=lon_nbr;
rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ);
if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False;
/* Use fields that may be present in input file to override, if necessary, default lon/lat order
area and mask are both suitable templates for determining input lat/lon ordering
NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */
if(area_id != NC_MIN_INT) tpl_id=area_id;
else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id;
if(tpl_id != NC_MIN_INT){
int tpl_rnk_nbr;
var_id=tpl_id;
/* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */
rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr);
rcd=nco_inq_vardimid(in_id,var_id,dmn_ids);
/* fxm: Optimize discovery of lat/lon ordering */
for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){
rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm);
rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz);
if(!strcmp(dmn_nm,lat_dmn_nm)){
assert(dmn_sz == lat_nbr);
assert(dmn_idx == 0);
lat_psn_in=dmn_idx;
} /* !lat */
if(!strcmp(dmn_nm,lon_dmn_nm)){
assert(dmn_sz == lon_nbr);
assert(dmn_idx == 1);
lon_psn_in=dmn_idx;
} /* !lon */
} /* !dmn_idx */
} /* !tpl */
/* Obtain fields that may be present in input file */
if(area_id != NC_MIN_INT){
var_id=area_id;
rcd=nco_inq_vardimid(in_id,var_id,dmn_ids);
dmn_srt[lat_psn_in]=0L;
dmn_cnt[lat_psn_in]=lat_nbr;
dmn_srt[lon_psn_in]=0L;
dmn_cnt[lon_psn_in]=lon_nbr;
rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
} /* !area */
if(msk_id != NC_MIN_INT){
var_id=msk_id;
rcd=nco_inq_vardimid(in_id,var_id,dmn_ids);
dmn_srt[lat_psn_in]=0L;
dmn_cnt[lat_psn_in]=lat_nbr;
dmn_srt[lon_psn_in]=0L;
dmn_cnt[lon_psn_in]=lon_nbr;
if(msk_rnk_nbr != grd_rnk_nbr){
/* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */
for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){
dmn_srt[dmn_idx]=0L;
dmn_cnt[dmn_idx]=1L;
} /* !dmn_idx */
dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L;
dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr;
dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr;
} /* !msk_rnk_nbr */
rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ);
} /* !msk */
/* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center)
Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lat_nbr;
dmn_cnt[1]=bnd_nbr;
if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=lon_nbr;
dmn_cnt[1]=bnd_nbr;
if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ);
} /* !flg_grd_2D */
/* Obtain units, if any, of input area */
if(area_id != NC_MIN_INT){
rcd=nco_inq_att_flg(in_id,area_id,unt_sng,&att_typ,&att_sz);
if(rcd == NC_NOERR && att_typ == NC_CHAR){
att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ));
rcd+=nco_get_att(in_id,area_id,unt_sng,att_val,att_typ);
/* NUL-terminate attribute before using strstr() */
att_val[att_sz]='\0';
if(!strcasestr(att_val,"radian")) flg_area_sr=False;
if(att_val) area_unt=(char *)strdup(att_val);
if(att_val) att_val=(char *)nco_free(att_val);
} /* end rcd && att_typ */
} /* !area_id */
/* Additional information that may be required for any input grid */
if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl);
if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl);
/* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */
int flg_pck; /* [flg] Variable is packed on disk */
rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck);
if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in);
rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck);
if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in);
/* Close input netCDF file */
nco_close(in_id);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
/* Above this line, fl_in and in_id refer to input file to be regridded
Below this line, fl_out and out_id refer to grid-file to be output */
dfl_lvl=rgr->dfl_lvl;
fl_out=rgr->fl_grd;
fl_out_fmt=rgr->fl_out_fmt;
if(!fl_out){
(void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm);
(void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* !fl_out */
/* Define output variable values */
int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */
int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */
if(grd_rnk_nbr == dmn_nbr_1D){
dmn_sz_int[0]=col_nbr;
}else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */
/* SCRIP introduced [lon,lat] convention because more natural for Fortran
NB: This [lon,lat] convention applies ONLY to grid_dims variable
Write all other SCRIP variables as [lat,lon]
Nonsensical? Yes, but backwards compatibility is priceless */
lon_psn=0;
lat_psn=1;
dmn_sz_int[lon_psn]=lon_nbr;
dmn_sz_int[lat_psn]=lat_nbr;
} /* !dmn_nbr_2D */
if(flg_grd_crv){
/* For curvilinear grids first, if necessary, infer corner boundaries
Then perform sanity check using same code on inferred and copied grids */
if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){
/* Only CESM CICE is known to fit these constraints
Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid
Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid
Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole
Nominal centers do not agree with true centers computed from corners
CICE may run in decomposed/unstructured mode, each column writes separately to output buffer?
This could explain missing coordinates in non-ocean gridcells
However, land points are completely masked (grid centers and corners are missing)
Oversight? Why not write coordinates for land-masked cells?
Regridder needs corners so we fill-in missing boundaries with derived grid
Gave up on inferring 20170521 once tri-pole grid complexity became apparent */
const long idx_dbg=rgr->idx_dbg;
double lat_ctr_drv; /* [dgr] Latitude center, derived */
double lon_ctr_drv; /* [dgr] Longitude center, derived */
double lat_crn_drv; /* [dgr] Latitude corner, derived */
double lon_crn_drv; /* [dgr] Longitude corner, derived */
long idx_ctr_sth; /* [idx] Index of southern neighbor */
long idx_ctr_nrt; /* [idx] Index of northern neighbor */
long idx_crn_sth; /* [idx] Index of southern neighbor */
long idx_crn_nrt; /* [idx] Index of northern neighbor */
long lon_idx_crr; /* [idx] Current longitude index */
long lon_vld_frs; /* [idx] First valid longitude in latitude row */
long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */
long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */
lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long));
lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long));
/* First valid gridcell sets west and south bounds of entire grid */
for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){
if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break;
} /* !grd_sz_nbr */
assert(idx_ctr != grd_sz_nbr);
idx_crn=idx_ctr*grd_crn_nbr;
lat_sth=lat_crn[idx_crn];
lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */
lon_wst=lon_crn[idx_crn];
lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr);
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
idx_ctr=lat_idx*lon_nbr;
/* Find first valid longitude at this latitude */
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++)
if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break;
lon_vld_frs=lon_idx;
/* 20170519: Verified all tri-pole grid latitudes have at least one valid point */
if(lon_vld_frs == -1L) abort();
for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){
/* Find previous and next valid longitude for all longitudes at this latitude
Cells can be their own previous/next valid longitude */
lon_vld_prv[lon_idx_crr]=-1L;
lon_vld_nxt[lon_idx_crr]=-1L;
/* Start from current longitude and move left (west)... */
for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--)
if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break;
if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx;
/* Start from current longitude and move right (east)... */
for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++)
if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break;
if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx;
/* Wrap west if previous valid cell not found */
lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L];
/* Wrap east if next valid cell not found */
lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0];
} /* !lon_idx_crr */
/* Derive centers and corners for each missing point */
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){
idx_ctr=lat_idx*lon_nbr+lon_idx;
idx_crn=idx_ctr*grd_crn_nbr;
if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){
lat_sth=lat_crn[idx_crn];
lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */
lat_ctr_drv=lat_sth+0.5*lat_ncr;
lat_crn_drv=lat_sth;
lon_wst=lon_crn[idx_crn];
lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */
lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5);
if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr);
} /* !idx_ctr */
if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){
if(lat_idx != 0L){
/* Not bottom row */
idx_ctr_sth=idx_ctr-lon_nbr;
if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){
/* Copy southern corners from northern corners of southern neighbor */
idx_crn_sth=idx_ctr_sth*grd_crn_nbr;
lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L];
lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L];
lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L];
lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L];
} /* !mss_val */
} /* !lat_idx */
if(lat_idx != lat_nbr-1L){
/* Not top row */
idx_ctr_nrt=idx_ctr+lon_nbr;
if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){
/* Copy northern corners from southern corners of northern neighbor */
idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr;
lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L];
lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L];
lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L];
lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L];
} /* !mss_val */
} /* !lat_idx */
/* Got to here before giving up
Idea was to interpolate missing cell corners between previous and next valid cell */
/* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */
lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5);
lon_crn_drv=lon_wst+lon_ncr*lon_idx;
if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0;
lat_ctr[idx_ctr]=lat_ctr_drv;
lon_ctr[idx_ctr]=lon_ctr_drv;
lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv;
lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr;
lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv;
lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr;
/* Branch-cut rule */
if(lon_crn_drv+lon_ncr >= 360.0){
lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0;
lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0;
} /* !brnch */
} /* !mss_val */
} /* !lon_idx */
} /* !lat_idx */
if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt);
if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv);
} /* !False || !CICE */
if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){
/* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians
Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array
Diagnose grid corners from midpoints
Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat
Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid
Always use input order to write skeleton file
Change that order, if necessary, to write SCRIP grid file
In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point
These are the three points above the point, the three points at the same latitude, and the three points beneath the point
In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center
It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior
For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges
Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class
NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */
double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */
double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */
lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double));
lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double));
long int idx_rl; /* [idx] Index into real unrolled array */
long int idx_fk; /* [idx] Index into fake unrolled array */
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */
idx_rl=lat_idx*lon_nbr+lon_idx;
idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1;
/* Copy real grid to interior of fake grid */
lat_ctr_fk[idx_fk]=lat_ctr[idx_rl];
lon_ctr_fk[idx_fk]=lon_ctr[idx_rl];
} /* !lon */
} /* !lat */
/* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment
Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR
20160111: Use macros/functions to determine longitude adjustments that are always less than 180
This ensures all longitudes contributing to extrapolated longitude are from same branch cut */
/* Bottom row */
lat_idx=0; /* lat idx of extrapolated point on fake grid */
for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */
idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */
idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */
lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]);
lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]);
} /* !lon */
/* Top row */
lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */
for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */
idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */
idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */
lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]);
lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]);
} /* !lon */
/* Left side */
lon_idx=0; /* lon idx of extrapolated point on fake grid */
for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */
idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */
idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */
lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]);
lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]);
} /* !lat */
/* Right side */
lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */
for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */
idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */
idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */
lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]);
lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]);
} /* !lat */
/* LL */
lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]);
lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]);
/* LR */
lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]);
lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]);
/* UR */
lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]);
lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]);
/* UL */
lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]);
lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]);
if(nco_dbg_lvl_get() >= nco_dbg_std){
long idx_dbg;
idx_dbg=rgr->idx_dbg;
(void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]);
} /* !dbg */
long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */
long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */
long int idx_fk_crn_ll_ctr_ll;
long int idx_fk_crn_ll_ctr_lr;
long int idx_fk_crn_ll_ctr_ur;
long int idx_fk_crn_ll_ctr_ul;
long int idx_fk_crn_lr_ctr_ll;
long int idx_fk_crn_lr_ctr_lr;
long int idx_fk_crn_lr_ctr_ur;
long int idx_fk_crn_lr_ctr_ul;
long int idx_fk_crn_ur_ctr_ll;
long int idx_fk_crn_ur_ctr_lr;
long int idx_fk_crn_ur_ctr_ur;
long int idx_fk_crn_ur_ctr_ul;
long int idx_fk_crn_ul_ctr_ll;
long int idx_fk_crn_ul_ctr_lr;
long int idx_fk_crn_ul_ctr_ur;
long int idx_fk_crn_ul_ctr_ul;
double *crn_lat;
double *crn_lon;
crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double));
crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double));
size_t wrn_nbr_max=20;
size_t wrn_nbr=0;
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){
/* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid
Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner"
Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left
lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays
lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */
lon_idx_fk=lon_idx+1;
lat_idx_fk=lat_idx+1;
idx_rl=lat_idx*lon_nbr+lon_idx;
idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk;
/* Determine index into fake array (valid everywhere it is applied)
Comments after each equation are formula for real index (valid only at interior gridcells) */
idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1
idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx
idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx
idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1;
idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx
idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1
idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1
idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx;
idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx
idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1
idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1
idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx;
idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1
idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx
idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx
idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1;
/* 20160111: Algorithm requires that all longitudes in template be on same "branch cut"
If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired
Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */
idx_crn_ll=grd_crn_nbr*idx_rl+0;
lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]);
lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]);
idx_crn_lr=grd_crn_nbr*idx_rl+1;
lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]);
lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]);
idx_crn_ur=grd_crn_nbr*idx_rl+2;
lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]);
lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]);
idx_crn_ul=grd_crn_nbr*idx_rl+3;
lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]);
lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]);
crn_lat[0]=lat_crn[idx_crn_ll];
crn_lat[1]=lat_crn[idx_crn_lr];
crn_lat[2]=lat_crn[idx_crn_ur];
crn_lat[3]=lat_crn[idx_crn_ul];
crn_lon[0]=lon_crn[idx_crn_ll];
crn_lon[1]=lon_crn[idx_crn_lr];
crn_lon[2]=lon_crn[idx_crn_ur];
crn_lon[3]=lon_crn[idx_crn_ul];
/* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner */
flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl);
if(!flg_ccw && wrn_nbr < wrn_nbr_max){
(void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]);
wrn_nbr++;
if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max);
} /* endif */
lat_crn[idx_crn_ll]=crn_lat[0];
lat_crn[idx_crn_lr]=crn_lat[1];
lat_crn[idx_crn_ur]=crn_lat[2];
lat_crn[idx_crn_ul]=crn_lat[3];
lon_crn[idx_crn_ll]=crn_lon[0];
lon_crn[idx_crn_lr]=crn_lon[1];
lon_crn[idx_crn_ur]=crn_lon[2];
lon_crn[idx_crn_ul]=crn_lon[3];
} /* !lon */
} /* !lat */
if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk);
if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk);
if(crn_lon) crn_lon=(double *)nco_free(crn_lon);
if(crn_lat) crn_lat=(double *)nco_free(crn_lat);
} /* !(lat_bnd_id && lon_bnd_id) */
} /* !flg_grd_crv */
if(flg_1D_psd_rct_bnd){
double lon_brnch_min;
double lon_brnch_max;
double lon_dff;
assert(grd_crn_nbr == 4);
/* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */
for(col_idx=0;col_idx<col_nbr;col_idx++){
lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1];
lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1];
lon_dff=lon_brnch_max-lon_brnch_min;
if(lon_dff >= 180.0){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff);
lon_brnch_max-=360.0;
}else if(lon_dff <= -180.0){
lon_brnch_max+=360.0;
} /* !lon_dff */
/* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */
if(lon_brnch_min <= lon_brnch_max){
lon_bnd[2*col_idx]=lon_brnch_min;
lon_bnd[2*col_idx+1]=lon_brnch_max;
}else{
lon_bnd[2*col_idx]=lon_brnch_max;
lon_bnd[2*col_idx+1]=lon_brnch_min;
} /* end else */
} /* !col_idx */
/* Convert boundaries that were provided as pseudo-rectangular to corners */
for(col_idx=0;col_idx<col_nbr;col_idx++){
idx=grd_crn_nbr*col_idx;
/* fxm: OCO2 provides boundaries in CW not CCW orientation */
lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */
lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */
lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */
lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */
lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */
lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */
lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */
lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */
/* fxm: OCO2 provides boundaries in CW not CCW orientation */
} /* !col_idx */
} /* flg_1D_psd_rct_bnd */
if(flg_grd_crv || flg_1D_psd_rct_bnd){
/* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids
Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut)
Starting 20151205, use same sanity check for both inferred and copied curvilinear grids
20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0]
Also, it may assume input is ascending swath and fail for descending swaths
Complications not fully addressed:
Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema
Swaths may cross the date-line (and back!) */
/* Determine numeric bounds of input coordinate system */
double lon_min_min;
double lon_max_max;
nco_bool NCO_LON_0_TO_360=True;
if(has_mss_val_ctr){
for(idx=0;idx<grd_sz_nbr;idx++)
if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(lon_ctr[idx] < 0.0) break;
} /* !has_mss_val_ctr */
if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False;
if(NCO_LON_0_TO_360){
lon_min_min=0.0;
lon_max_max=360.0;
}else{
lon_min_min=-180.0;
lon_max_max=180.0;
} /* !NCO_LON_0_TO_360 */
/* Correct for extrapolation outside boundaries */
for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){
idx_ctr=idx/grd_crn_nbr;
if(has_mss_val_ctr)
if(lat_ctr[idx_ctr] == mss_val_ctr_dbl)
continue;
if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){
idx_crn_ll=grd_crn_nbr*idx_ctr+0;
idx_crn_lr=grd_crn_nbr*idx_ctr+1;
idx_crn_ur=grd_crn_nbr*idx_ctr+2;
idx_crn_ul=grd_crn_nbr*idx_ctr+3;
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]);
/* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */
if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0;
if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0;
if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0;
if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0;
} /* !sanity */
} /* !idx */
/* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system
Curvilinear inferred grid are, by construction, branch-cut compliant
fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */
if(nco_dbg_lvl_get() >= nco_dbg_std){
long idx_dbg;
idx_dbg=rgr->idx_dbg;
idx_crn_ll=grd_crn_nbr*idx_dbg+0;
idx_crn_lr=grd_crn_nbr*idx_dbg+1;
idx_crn_ur=grd_crn_nbr*idx_dbg+2;
idx_crn_ul=grd_crn_nbr*idx_dbg+3;
(void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]);
} /* !dbg */
} /* !flg_grd_crv || flg_1D_psd_rct_bnd */
if(flg_grd_crv){
/* Copy centers into empty output array */
for(idx=0;idx<grd_sz_nbr;idx++){
grd_ctr_lat[idx]=lat_ctr[idx];
grd_ctr_lon[idx]=lon_ctr[idx];
} /* !idx */
/* Copy inferred or copied (from input) sanity-checked corners into empty output array */
for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){
grd_crn_lat[idx]=lat_crn[idx];
grd_crn_lon[idx]=lon_crn[idx];
} /* !idx */
} /* !flg_grd_crv */
/* 20150512 Many 2D datasets have bad bounds
Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points
Such files have correct gw arrays and incorrect cell bounds
flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */
const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */
const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65)
if(flg_grd_2D){
if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){
if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm);
/* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data
NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids)
These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */
if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]);
if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */
if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0;
for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++)
lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]);
if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]);
if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */
if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */
if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */
lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]);
for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++)
lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]);
lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]);
lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L];
for(idx=0;idx<lon_nbr;idx++){
lon_bnd[2L*idx]=lon_ntf[idx];
lon_bnd[2L*idx+1L]=lon_ntf[idx+1L];
} /* !idx */
for(idx=0;idx<lat_nbr;idx++){
lat_bnd[2L*idx]=lat_ntf[idx];
lat_bnd[2L*idx+1L]=lat_ntf[idx+1L];
} /* !idx */
}else{ /* !(lat_bnd_id && lon_bnd_id) */
/* Derive interfaces (ntf) from bounds (bnd) data on disk */
for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx];
lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L];
for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx];
lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L];
lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */
lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L];
} /* !(lat_bnd_id && lon_bnd_id) */
} /* !flg_grd_2D */
if(flg_grd_2D){
/* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */
double lat_ctr_tst_eqa;
double lat_ctr_tst_fv;
if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr;
if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L);
double lat_ctr_tst_gss;
/* In diagnosing grids, agreement with input to single-precision is "good enough for government work"
Hence some comparisons cast from double to float before comparison
20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits
20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006
20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07
20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */
if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm);
if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa;
if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv;
double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision
double *wgt_Gss=NULL; // [frc] Gaussian weights double precision
if(lat_typ == nco_grd_lat_nil){
/* Check for Gaussian grid */
lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double));
wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double));
(void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss);
lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]);
/* Gaussian weights on output grid will be double-precision accurate
Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */
const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!)
if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)));
if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss;
} /* !Gaussian */
if(lat_typ == nco_grd_lat_nil){
/* If still of unknown type, this 2D grid may be weird
This occurs, e.g., with POP3 destination grid
Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */
lat_typ=nco_grd_lat_unk;
} /* !nil */
/* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */
if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk;
else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss;
else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv;
else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa;
else assert(False);
/* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */
if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){
//if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L];
lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]);
switch(lat_typ){
case nco_grd_lat_fv:
lat_ncr=lat_spn/(lat_nbr-1L);
if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr;
for(lat_idx=2;lat_idx<lat_nbr;lat_idx++)
if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr;
break;
case nco_grd_lat_eqa:
lat_ncr=lat_spn/lat_nbr;
for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++)
if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr;
break;
case nco_grd_lat_gss:
for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++)
lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]);
/* First guess for lat_ntf is midway between Gaussian abscissae */
for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++)
lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]);
/* Iterate guess until area between interfaces matches Gaussian weight */
for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){
double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */
double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */
// 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid
// Probably because latitude iterations assume s2n grid and ECMWF is n2s
// Possibly also because latitude coordinates are stored in single precision
// Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively?
const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink)
itr_cnt=0;
lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L]));
fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss;
while(fabs(fofx_at_x0) > eps_rlt_cnv){
/* Newton-Raphson iteration:
Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution)
f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid
f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid
dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid
dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid
x_better=x0-f(x0)/f'(x0) */
dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]);
if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0;
lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */
lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L]));
fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss;
if(++itr_cnt > itr_nbr_max){
(void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx);
nco_exit(EXIT_FAILURE);
} /* endif */
} /* !while */
} /* !lat_idx */
/* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */
if(lat_nbr%2){
/* lat_nbr is odd */
for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L];
}else{
/* lat_nbr is even */
for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx];
} /* !flg_lat_evn */
if(lat_sin) lat_sin=(double *)nco_free(lat_sin);
break;
case nco_grd_lat_unk:
/* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lat_typ */
if(lat_typ == nco_grd_lat_gss){
/* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees
Loops below recompute interior interfaces only
Southern- and northern-most interfaces must be explicitly assigned
Inferral test for Gaussian grid _assumes_ global grid
Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */
if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0;
if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0;
} /* !nco_grd_lat_gss */
/* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */
for(idx=0;idx<lat_nbr;idx++){
lat_bnd[2L*idx]=lat_ntf[idx];
lat_bnd[2L*idx+1L]=lat_ntf[idx+1L];
} /* !idx */
} /* !(lat_bnd_id && lon_bnd_id) */
/* Use centers and boundaries to diagnose latitude weights */
switch(lat_typ){
case nco_grd_lat_eqa:
case nco_grd_lat_fv:
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx]));
break;
case nco_grd_lat_gss:
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx];
break;
case nco_grd_lat_unk:
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx]));
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm);
break;
default:
nco_dfl_case_generic_err(); break;
} /* !lat_typ */
/* Diagnose type of longitude grid by testing second longitude center against formulae */
lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L];
lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]);
if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn;
if(lon_typ == nco_grd_lon_nil){
if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr;
else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr;
else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst;
else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst;
else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb;
else lon_typ=nco_grd_lon_unk;
} /* !lon_typ */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ));
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ));
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ));
} /* !flg_grd_2D */
if(flg_grd_2D){
if(nco_dbg_lvl_get() >= nco_dbg_crr){
for(idx=0;idx<lat_nbr;idx++){
(void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]);
for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++)
(void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", ");
} /* end loop over lat */
for(idx=0;idx<lon_nbr;idx++){
(void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]);
for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++)
(void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", ");
} /* end loop over lon */
} /* endif dbg */
/* Fuzzy test of latitude weight normalization */
//const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */
const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */
lat_wgt_ttl=0.0;
for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx];
if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){
double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */
lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L]));
if(grd_typ != nco_grd_2D_unk && fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){
(void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc);
nco_exit(EXIT_FAILURE);
} /* !imprecise */
} /* !nco_grd_lat_eqa, !nco_grd_lat_fv */
} /* !flg_grd_2D */
if(flg_grd_2D){
assert(grd_crn_nbr == 4);
if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){
/* If interfaces were diagnosed from centers, copy corners from interfaces */
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){
idx=grd_crn_nbr*lon_idx;
lon_crn[idx]=lon_ntf[lon_idx]; /* LL */
lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */
lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */
lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */
} /* !lon_idx */
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
idx=grd_crn_nbr*lat_idx;
lat_crn[idx]=lat_ntf[lat_idx]; /* LL */
lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */
lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */
lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */
} /* !lat_idx */
}else{ /* !lat_bnd_id */
/* If boundaries were provided in input dataset, copy corners from boundaries */
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){
idx=grd_crn_nbr*lon_idx;
lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */
lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */
lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */
lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */
} /* !lon_idx */
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
idx=grd_crn_nbr*lat_idx;
lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */
lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */
lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */
lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */
} /* !lat_idx */
} /* !lat_bnd_id */
} /* !flg_grd_2D */
/* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */
if(flg_grd_1D){
for(idx=0;idx<grd_sz_nbr;idx++){
grd_ctr_lat[idx]=lat_ctr[idx];
grd_ctr_lon[idx]=lon_ctr[idx];
if(flg_wrt_crn){
for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){
idx2=grd_crn_nbr*idx+crn_idx;
grd_crn_lat[idx2]=lat_crn[idx2];
grd_crn_lon[idx2]=lon_crn[idx2];
} /* !crn */
}else{ /* !flg_wrt_crn */
/* Defaults for ERWG when corners are unknown */
for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){
idx2=grd_crn_nbr*idx+crn_idx;
grd_crn_lat[idx2]=0.0;
grd_crn_lon[idx2]=0.0;
} /* !crn */
} /* !flg_wrt_crn */
} /* !col */
} /* !flg_grd_1D */
if(flg_grd_2D){
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){
idx=lat_idx*lon_nbr+lon_idx;
grd_ctr_lat[idx]=lat_ctr[lat_idx];
grd_ctr_lon[idx]=lon_ctr[lon_idx];
for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){
idx2=grd_crn_nbr*idx+crn_idx;
lat_idx2=lat_idx*grd_crn_nbr+crn_idx;
lon_idx2=lon_idx*grd_crn_nbr+crn_idx;
grd_crn_lat[idx2]=lat_crn[lat_idx2];
grd_crn_lon[idx2]=lon_crn[lon_idx2];
} /* !crn */
} /* !lon */
} /* !lat */
/* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids
Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */
if(!flg_s2n){
for(idx=0L;idx<grd_sz_nbr;idx++){
idx2=grd_crn_nbr*idx;
flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl);
} /* !idx */
} /* !flg_s2n */
} /* !flg_grd_2D */
/* Find span of all grids */
double lat_max; /* [dgr] Maximum latitude */
double lat_min; /* [dgr] Minimum latitude */
double lon_max; /* [dgr] Maximum longitude */
double lon_min; /* [dgr] Minimum longitude */
idx_ctr=0;
if(has_mss_val_ctr){
/* Find first non-missing value center and thus corners */
for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){
if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break;
} /* !grd_sz_nbr */
assert(idx_ctr != grd_sz_nbr);
} /* !has_mss_val_ctr */
if(flg_wrt_crn){
/* Grids with corner boundaries supplied or inferred */
lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr];
lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr];
lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr];
lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr];
for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){
idx_ctr=idx/grd_crn_nbr;
if(has_mss_val_ctr)
if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl)
continue;
lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max;
lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max;
lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min;
lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min;
} /* !idx */
}else{ /* !flg_wrt_crn */
/* 20170424: Diagnose grid-extent when corners were not provided or inferred
This is usually (always?) for 1d unstructured grids with only centers provided */
lon_max=grd_ctr_lon[idx_ctr];
lat_max=grd_ctr_lat[idx_ctr];
lon_min=grd_ctr_lon[idx_ctr];
lat_min=grd_ctr_lat[idx_ctr];
for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){
if(has_mss_val_ctr)
if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl)
continue;
lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max;
lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max;
lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min;
lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min;
} /* !idx_ctr */
} /* flg_wrt_crn */
lat_spn=lat_max-lat_min;
lon_spn=lon_max-lon_min;
/* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */
if(flg_grd_2D){
if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn;
}else{ /* !flg_grd_2D */
if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn;
} /* flg_wrt_crn */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max);
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn));
/* Write ERWG hints if filenames provided and grid is regional */
char *fl_hnt=NULL;
char *fl_hnt_dst=NULL;
char *fl_hnt_src=NULL;
if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst;
if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src;
if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){
const char *fl_mode="w";
FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */
if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt);
/* Open output file */
if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){
(void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt);
if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional");
if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional");
rcd=fclose(fp_hnt);
if(rcd != 0){
(void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt);
} /* !nco_grd_xtn */
/* Diagnose area if necessary
20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere
Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type)
On the other hand CAM "area" variable is exactly what we want for gridfile
Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute
Re-diagnose areas considered untrustworthy so output area array does not contain missing values */
if(flg_wrt_crn && has_mss_val_area){
const double mss_val_dbl=mss_val_area_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(area[idx] == mss_val_dbl) break;
if(idx < grd_sz_nbr) use_mss_val_area=True;
if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in);
} /* !has_mss_val_area */
/* 20170511: There remain a handful of cases when input area should be diagnosed not copied
These include using ncremap in SGS mode when inferred grids must use sensible area units
Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO
This would bias ERWG --user_areas produced values by ~10^10
Setting flg_dgn_area ensures inferred area uses [sr] */
const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */
if(flg_wrt_crn && /* If bounds are available to compute area and ... */
(area_id == NC_MIN_INT || /* Area is not in input file ... */
use_mss_val_area || /* Area is untrustworthy */
flg_dgn_area)){ /* User/application explicitly requests diagnostic area */
/* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */
if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm);
if(flg_grd_crv || flg_grd_1D){
/* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */
nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area);
}else if(flg_grd_2D){
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++)
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++)
area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */
} /* !flg_grd_2D */
} /* !area_id */
/* ERWG will fail unless grid file has mask variable
Use nul-mask (all points included) whenever input mask variable not supplied/detected
Define nul-mask true everywhere and overwrite with false below
Input mask can be any type and output mask will always be NC_INT */
for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1;
if(msk_id != NC_MIN_INT){
/* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention)
Input mask can be any type and output mask will always be NC_INT
Applications:
ALM/CLM mask (landmask) is NC_FLOAT and defines though does not use NC_FLOAT missing value
CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value
RACMO mask is NC_FLOAT and defines though does not use NC_FLOAT missing value
AMSR mask is NC_SHORT and has no missing value
GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */
if(msk_typ != NC_INT){
if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ));
if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero or the missing value will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this oh so pleasant informational WARNING. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results in a turn-key viewer like Panoply or ncview.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ),msk_nm_in,msk_nm_in);
} /* msk_typ */
switch(msk_typ){
case NC_FLOAT:
if(has_mss_val_msk){
const float mss_val_flt=mss_val_msk_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.fp[idx] == 0.0f) msk[idx]=0;
} /* !mss_val */
break;
case NC_DOUBLE:
if(has_mss_val_msk){
const double mss_val_dbl=mss_val_msk_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.dp[idx] == 0.0) msk[idx]=0;
} /* !mss_val */
break;
case NC_INT:
if(has_mss_val_msk){
const int mss_val_int=mss_val_msk_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.ip[idx] == 0) msk[idx]=0;
} /* !mss_val */
break;
case NC_SHORT:
/* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */
if(has_mss_val_msk){
const short mss_val_sht=mss_val_msk_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0;
/* 20160111: AMSR kludge fxm */
// for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0;
} /* !mss_val */
break;
case NC_BYTE:
if(has_mss_val_msk){
const nco_byte mss_val_byt=mss_val_msk_dbl;
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0;
}else{
for(idx=0;idx<grd_sz_nbr;idx++)
if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0;
/* 20170811: GHRSST kludge? */
} /* !mss_val */
break;
default:
(void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ));
nco_dfl_case_generic_err();
return NCO_ERR;
break;
} /* !msk_typ */
if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp);
} /* !msk_id */
if(nco_dbg_lvl_get() >= nco_dbg_scl){
lat_wgt_ttl=0.0;
area_ttl=0.0;
if(flg_grd_2D){
(void)fprintf(stderr,"%s: INFO %s reports inferred rectangular latitude grid area diagnostics (lat_wgt_ttl and frc_lat_wgt should be valid):\n",nco_prg_nm_get(),fnc_nm);
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++)
lat_wgt_ttl+=lat_wgt[lat_idx];
}else{
(void)fprintf(stderr,"%s: INFO %s reports inferred unstructured or curvilinear latitude grid area diagnostics (ignore lat_wgt_ttl and frc_lat_wgt):\n",nco_prg_nm_get(),fnc_nm);
} /* !flg_grd_2D */
for(lat_idx=0;lat_idx<lat_nbr;lat_idx++)
for(lon_idx=0;lon_idx<lon_nbr;lon_idx++)
area_ttl+=area[lat_idx*lon_nbr+lon_idx];
(void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI));
assert(area_ttl > 0.0);
/* Protect following assertion since area might be in, e.g., km2 (ELM, RACMO) */
if(flg_area_sr) assert(area_ttl <= 4.0*M_PI);
const double eps_rlt_area=1.0e-12; /* [frc] Error tolerance for global area */
if(nco_grd_xtn == nco_grd_xtn_glb){
if(fabs(1.0-area_ttl/(4.0*M_PI)) > eps_rlt_area)
(void)fprintf(stdout,"%s: WARNING %s reports area for inferred global grid differs from true global area (4*pi sr) by greater than allowed fraction %g\n",nco_prg_nm_get(),fnc_nm,eps_rlt_area);
} /* !nco_grd_xtn_glb */
} /* !dbg */
/* Open grid file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Define dimensions */
/* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear)
But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */
/* flg_wrt_crn=True; */
if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn);
rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz);
rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk);
int shuffle; /* [flg] Turn-on shuffle filter */
int deflate; /* [flg] Turn-on deflate filter */
deflate=(int)True;
shuffle=NC_SHUFFLE;
/* Define variables */
(void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */
(void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl);
if(flg_wrt_crn){
dmn_ids[0]=dmn_id_grd_sz;
dmn_ids[1]=dmn_id_grd_crn;
(void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl);
(void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl);
} /* !flg_wrt_crn */
/* Define attributes */
aed_sct aed_mtd;
char *att_nm;
if(strstr(rgr->grd_ttl,"None given")){
const char att_fmt[]="NCO inferred this grid from input file %s";
att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char));
sprintf(att_val,att_fmt,rgr->fl_in);
}else{
att_val=strdup(rgr->grd_ttl);
} /* !grd_ttl */
rcd=nco_char_att_put(out_id,NULL,"title",att_val);
rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP");
const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */
rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp);
rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO");
(void)nco_hst_att_cat(out_id,rgr->cmd_ln);
(void)nco_vrs_att_cat(out_id);
rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ));
rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ));
rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)");
if(flg_area_sr){
rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid");
rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle");
rcd=nco_char_att_put(out_id,area_nm,"units","steradian");
}else{ /* !flg_area_sr */
rcd=nco_char_att_put(out_id,area_nm,"long_name","Area on Source Grid");
// rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle");
rcd=nco_char_att_put(out_id,area_nm,"units",area_unt);
} /* !flg_area_sr */
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude");
if(ngl_unt){
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt);
}else{
/* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees");
} /* !ngl_unt */
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers");
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude");
if(ngl_unt){
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt);
}else{
/* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees");
} /* !ngl_unt */
if(flg_wrt_crn){
rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices");
if(ngl_unt){
rcd=nco_char_att_put(out_id,grd_crn_lat_nm,unt_sng,ngl_unt);
}else{
/* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees");
} /* !ngl_unt */
rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices");
if(ngl_unt){
rcd=nco_char_att_put(out_id,grd_crn_lon_nm,unt_sng,ngl_unt);
}else{
/* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */
if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees");
} /* !ngl_unt */
rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm);
rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm);
} /* !flg_wrt_crn */
rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid");
rcd=nco_char_att_put(out_id,msk_nm,"units","none");
/* Begin data mode */
(void)nco_enddef(out_id);
/* Write variables */
dmn_srt[0]=0L;
dmn_cnt[0]=grd_rnk_nbr;
rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=grd_sz_nbr;
rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ);
if(flg_wrt_crn){
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=grd_sz_nbr;
dmn_cnt[1]=grd_crn_nbr;
rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=grd_sz_nbr;
dmn_cnt[1]=grd_crn_nbr;
rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ);
} /* !flg_wrt_crn */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
fl_out=rgr->fl_ugrid;
if(fl_out){
/* Test UGRID:
Documentation: https://github.com/ugrid-conventions/ugrid-conventions
Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it
ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc
ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc
ncks --cdl -v mesh_node_y ~/grd_ugrid.nc
ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc
ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc
ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc
ncks --cdl -m -M ~/grd_ugrid.nc */
char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */
char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */
char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */
char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */
char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */
char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */
char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */
char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */
char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */
char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */
char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */
char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */
char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */
char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */
double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */
double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */
double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */
double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */
double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */
double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */
int *dg_nd; /* [idx] edge_node_connectivity variable */
int *fc_nd; /* [idx] face_node_connectivity variable */
int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */
int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */
int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */
int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */
int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */
int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */
int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */
int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */
int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */
int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */
int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */
int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */
int msh_val=42; /* [id] Mesh topology variable value from Monty Python */
int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */
int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */
const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */
const long npe_nbr=2; /* [nbr] Number of nodes per edge */
const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */
long dg_idx; /* [idx] Counting index for edges */
long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */
long fc_idx; /* [idx] Counting index for faces */
long nd_idx; /* [idx] Counting index for nodes */
long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */
long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */
if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x");
if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y");
if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges");
if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x");
if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y");
if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces");
if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes");
if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes");
if(!msh_nm) msh_nm=(char *)strdup("mesh");
if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes");
if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x");
if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y");
if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two");
if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace");
if(flg_grd_1D){
(void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
}else if(flg_grd_2D){
/* Assume 2D grids are global and comprised of quadrilaterals */
switch(lat_typ){
case nco_grd_lat_fv:
/* Currently all 2D grids are converted to the same UGRID representation
fxm: Cap grids (e.g., FV) should eventually be written with a real cap,
rather than as the "polar teeth" representation currently used.
Polar teeth convention allows cap grid to be represented as rectangular on disk
However, cap grids are better suited to non-rectangular UGRID meshes */
case nco_grd_lat_eqa:
case nco_grd_lat_gss:
/* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */
dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */
(lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */
lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */
nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */
(lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */
1; /* NP: NP */
break;
case nco_grd_lat_unk:
case nco_grd_lat_nil:
default:
nco_dfl_case_generic_err(); break;
} /* !lat_typ */
}else if(flg_grd_crv){
(void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm);
nco_exit(EXIT_FAILURE);
} /* !flg_grd */
dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT));
dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ));
dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ));
fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT));
fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ));
fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ));
ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ));
ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ));
const long int idx_fst_crn_ll=0;
const long int idx_fst_crn_lr=1;
const long int idx_fst_crn_ur=2;
const long int idx_fst_crn_ul=3;
/* Node Ordering:
Each interior face requires one new node
Node 0 at SP
New latitude row moves next node North
Add nodes to run West->East */
/* SP */
ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */
ndy[0]=lat_crn[0];
/* Mid */
for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){
fc_idx=nd_idx-1L;
lat_idx=fc_idx/lon_nbr;
lon_idx=fc_idx%lon_nbr;
ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul];
ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul];
} /* !nd_idx */
/* NP */
ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul];
ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul];
/* Edge Ordering:
epf_nbr is number of distinct edges-per-face (incremental, for interior cells)
Each additional interior rectangular gridcell requires two new edges:
Edge 0 runs South->North for all cells
Edge 1 runs West->East for all cells
NP row requires only one new edge per face */
/* SP */
const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */
for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){
dg_idx=fc_idx*epf_nbr;
/* Edge 0 */
dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx;
dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L;
/* Edge 1 */
dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L;
dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L;
} /* !fc_idx */
/* Mid */
for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){
dg_idx=fc_idx*epf_nbr;
/* Edge 0 */
dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L;
dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L;
/* Edge 1 */
dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L;
dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L;
} /* !fc_idx */
/* NP */
for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){
/* Only one new edge per face in last row, easiest to count backwards from last edge */
dg_idx=dg_nbr-(fc_nbr-fc_idx);
/* NP faces require only only one new edge, Edge 0 */
dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L;
dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L;
} /* !fc_idx */
/* SP */
for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){
fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L;
fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */
fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */
fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out;
} /* !fc_idx */
/* Mid */
for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){
fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L;
fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L;
fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L;
fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L;
} /* !fc_idx */
/* NP */
for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){
fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L;
fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L;
fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L;
fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out;
} /* !fc_idx */
/* Characteristic coordinates */
for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){
idx=dg_idx*npe_nbr;
dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]);
dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]);
} /* !dg_idx */
/* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */
for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){
idx=fc_idx*npf_nbr;
if(fc_idx < lon_nbr){
fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]);
}else if(fc_idx >= fc_nbr-lon_nbr-1){
fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]);
}else if(fc_nd[idx+3L] != mss_val_int_out){
/* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */
fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]);
}else{
abort();
} /* !fc_idx */
if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]);
} /* !fc_idx */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg);
rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc);
rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd);
rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe);
rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf);
dmn_ids[0]=dmn_id_dg;
dmn_ids[1]=dmn_id_npe;
rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl);
dmn_ids[0]=dmn_id_fc;
dmn_ids[1]=dmn_id_npf;
rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl);
rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id);
if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl);
if(strstr(rgr->grd_ttl,"None given")){
const char att_fmt[]="NCO constructed this UGRID grid from scratch";
att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char));
sprintf(att_val,att_fmt);
}else{
att_val=strdup(rgr->grd_ttl);
} /* !grd_ttl */
rcd=nco_char_att_put(out_id,NULL,"title",att_val);
rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0");
rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp);
rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO");
(void)nco_hst_att_cat(out_id,rgr->cmd_ln);
(void)nco_vrs_att_cat(out_id);
rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology");
rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology");
rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data");
att_nm=strdup("topology_dimension");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=msh_nm;
aed_mtd.id=msh_id;
aed_mtd.sz=1;
aed_mtd.type=NC_INT;
aed_mtd.val.ip=&val_two;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,msh_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L;
att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR));
(void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm);
rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val);
rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm);
aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L;
att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR));
(void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm);
rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val);
rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm);
rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm);
aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L;
att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR));
(void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm);
rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val);
rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm);
rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude");
rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes");
rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east");
rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude");
rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes");
rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north");
rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity");
rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects");
att_nm=strdup("start_index");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=dg_nd_nm;
aed_mtd.id=dg_nd_id;
aed_mtd.sz=1;
aed_mtd.type=NC_INT;
aed_mtd.val.ip=&val_zero;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,dg_nd_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity");
rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes");
att_nm=strdup("start_index");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=fc_nd_nm;
aed_mtd.id=fc_nd_id;
aed_mtd.sz=1;
aed_mtd.type=NC_INT;
aed_mtd.val.ip=&val_zero;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,fc_nd_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
att_nm=strdup("_FillValue");
aed_mtd.att_nm=att_nm;
aed_mtd.var_nm=fc_nd_nm;
aed_mtd.id=fc_nd_id;
aed_mtd.sz=1;
aed_mtd.type=NC_INT;
aed_mtd.val.ip=&mss_val_int_out;
aed_mtd.mode=aed_create;
(void)nco_aed_prc(out_id,fc_nd_id,aed_mtd);
if(att_nm) att_nm=(char *)nco_free(att_nm);
rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude");
rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face");
rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east");
rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude");
rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face");
rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north");
rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude");
rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge");
rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east");
rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude");
rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge");
rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north");
/* Begin data mode */
(void)nco_enddef(out_id);
(void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=dg_nbr;
dmn_cnt[1]=epf_nbr;
(void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT);
dmn_srt[0]=dmn_srt[1]=0L;
dmn_cnt[0]=fc_nbr;
dmn_cnt[1]=npf_nbr;
(void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT);
dmn_srt[0]=0L;
dmn_cnt[0]=nd_nbr;
(void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=nd_nbr;
(void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=dg_nbr;
(void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ);
(void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ);
dmn_srt[0]=0L;
dmn_cnt[0]=fc_nbr;
(void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ);
(void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ);
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
/* Free memory associated with output file */
if(dgx) dgx=(double *)nco_free(dgx);
if(dgy) dgy=(double *)nco_free(dgy);
if(dg_nd) dg_nd=(int *)nco_free(dg_nd);
if(fcx) fcx=(double *)nco_free(fcx);
if(fcy) fcy=(double *)nco_free(fcy);
if(fc_nd) fc_nd=(int *)nco_free(fc_nd);
if(ndx) ndx=(double *)nco_free(ndx);
if(ndy) ndy=(double *)nco_free(ndy);
/* Free strings */
if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm);
if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm);
if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm);
if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm);
if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm);
if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm);
if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm);
if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm);
if(msh_nm) msh_nm=(char *)nco_free(msh_nm);
if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm);
if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm);
if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm);
if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm);
if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm);
} /* !fl_ugrid */
/* Free memory associated with input file */
if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int);
if(msk) msk=(int *)nco_free(msk);
if(area) area=(double *)nco_free(area);
if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat);
if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon);
if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat);
if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon);
if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd);
if(lat_crn) lat_crn=(double *)nco_free(lat_crn);
if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr);
if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf);
if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt);
if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd);
if(lon_crn) lon_crn=(double *)nco_free(lon_crn);
if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr);
if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf);
if(vrt_cll) vrt_cll=(int *)nco_free(vrt_cll);
if(vrt_lat) vrt_lat=(double *)nco_free(vrt_lat);
if(vrt_lon) vrt_lon=(double *)nco_free(vrt_lon);
/* Free strings */
if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in);
if(area_unt) area_unt=(char *)nco_free(area_unt);
if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm);
if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm);
if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm);
if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm);
if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in);
if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm);
if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm);
if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in);
if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in);
if(ngl_unt) ngl_unt=(char *)nco_free(ngl_unt);
if(vrt_cll_nm) vrt_cll_nm=(char *)nco_free(vrt_cll_nm);
if(vrt_lat_nm) vrt_lat_nm=(char *)nco_free(vrt_lat_nm);
if(vrt_lon_nm) vrt_lon_nm=(char *)nco_free(vrt_lon_nm);
return rcd;
} /* !nco_grd_nfr() */
double /* O [dgr] Longitude difference (lon_r-lon_l) */
nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */
(double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */
double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */
{
/* Purpose: Return difference of two longitudes in degrees
Assume longitudes are within 180 degrees of eachother
Default orientation is monotonically increasing longitude from left to right */
const char fnc_nm[]="nco_lon_dff_brnch_dgr()";
const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */
if(lon_dff >= 180.0){
(void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff);
return lon_dff-360.0;
}else if(lon_dff <= -180.0){
return lon_dff+360.0;
} /* !lon_dff */
return lon_dff;
} /* !nco_lon_dff_brnch_dgr() */
double /* O [rdn] Longitude difference (lon_r-lon_l) */
nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */
(double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */
double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */
{
/* Purpose: Return difference of two longitudes in radians
Assume longitudes are within pi radians of eachother
Default orientation is monotonically increasing longitude from left to right */
const char fnc_nm[]="nco_lon_dff_brnch_rdn()";
const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */
//nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */
/* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */
if(lon_dff >= M_PI){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff);
return lon_dff-M_PI-M_PI;
}else if(lon_dff <= -M_PI){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff);
return lon_dff+M_PI+M_PI;
} /* !lon_dff */
return lon_dff;
} /* !nco_lon_dff_brnch_rdn() */
double /* O [dgr] Longitude average */
nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */
(double lon_ll, /* I [dgr] Longitude at lower left of gridcell */
double lon_lr, /* I [dgr] Longitude at lower right of gridcell */
double lon_ur, /* I [dgr] Longitude at upper right of gridcell */
double lon_ul) /* I [dgr] Longitude at upper left of gridcell */
{
/* Purpose: Return average of four corner longitudes of quadrilateral
Assume longitudes are within 180 degrees of eachother
Default orientation is monotonically increasing longitude from left to right
WLOG, adjust all longitudes to be on same branch as lon_ll */
const char fnc_nm[]="nco_lon_crn_avg_brnch()";
double lon_dff; /* [dgr] Longitude difference */
lon_dff=lon_lr-lon_ll;
if(lon_dff >= 180.0){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff);
lon_lr-=360.0;
}else if(lon_dff <= -180.0){
lon_lr+=360.0;
} /* !lon_dff */
lon_dff=lon_ur-lon_ll;
if(lon_dff >= 180.0){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff);
lon_ur-=360.0;
}else if(lon_dff <= -180.0){
lon_ur+=360.0;
} /* !lon_dff */
lon_dff=lon_ul-lon_ll;
if(lon_dff >= 180.0){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff);
lon_ul-=360.0;
}else if(lon_dff <= -180.0){
lon_ul+=360.0;
} /* !lon_dff */
return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul);
} /* !nco_lon_crn_avg_brnch() */
double /* O [dgr] Longitude average */
nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */
(double *lon_crn, /* I [dgr] Longitude of gridcell corners */
long lon_nbr) /* I [nbr] Number of vertices in polygon */
{
/* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude
Assume longitudes are within 180 degrees of one another
Default orientation is monotonically increasing longitude from left to right
WLOG, adjust all longitudes to be on same branch as lon_ll */
// const char fnc_nm[]="nco_lon_ply_avg_brnch()";
double lon_dff; /* [dgr] Longitude difference */
double lon_avg; /* [dgr] Longitude average */
int lon_idx; /* [idx] Polygon vertex index */
assert(lon_nbr != 0);
lon_avg=lon_crn[0];
for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){
lon_avg+=lon_crn[lon_idx];
lon_dff=lon_crn[lon_idx]-lon_crn[0];
if(lon_dff >= 180.0){
lon_avg-=360.0;
}else if(lon_dff <= -180.0){
lon_avg+=360.0;
} /* !lon_dff */
} /* !lon_idx */
return lon_avg/lon_nbr;
} /* !nco_lon_ply_avg_brnch() */
nco_bool /* O [flg] Input corners were CCW */
nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */
(double * const crn_lat, /* [dgr] Latitude corners of gridcell */
double * const crn_lon, /* [dgr] Latitude corners of gridcell */
const int crn_nbr, /* [nbr] Number of corners per gridcell */
int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */
const int rcr_lvl) /* [nbr] Recursion level */
{
/* Purpose: Determine whether corner vertices are oriented CCW
If not, alter order so they are returned in CCW order
Function can call itself, and rcr_lvl indicates recursion level:
rcr_lvl=1: Called by host code, i.e., nco_grd_nfr()
rcr_lvl=2: Called by itself, i.e., nco_ccw_chk()
Assumptions:
Quadrilateral vertices are already corrected to obey branch-cut rules, i.e.,
all vertices are on "same side" of dateline or Greenwich as appropriate
Algorithm:
Start crn_idx=0, i.e., quadrilateral LL corner
Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR
Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR
Compute cross-product C = A x B
C is normal to plane containing A and B
Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW
if(ABC is CCW){
if(CDA is CCW)
Done
else
Copy D:=A (make CDA degenerate, triangularize quadrilateral)
endif
}else(ABC is not CCW){
Assume entire quadrilateral is CW
Take mirror image of quadrilateral by switching B with D
If(new ABC is CCW){
If(CDA is CCW)
Done
else
Copy D:=A (make CDA degenerate, triangularize quadrilateral)
endif
}else{
Fail (return False, meaning point should be masked)
}
All cases return True (i.e., CCW) from rcr_lvl=1 except last
Last case returns False, and calling code should mask such an aberrant point */
const char fnc_nm[]="nco_ccw_chk()";
/* MSVC compiler chokes unless array size is compile-time constant */
const int CRN_NBR_MSVC=4;
double sin_lat[CRN_NBR_MSVC];
double sin_lon[CRN_NBR_MSVC];
double cos_lat[CRN_NBR_MSVC];
double cos_lon[CRN_NBR_MSVC];
double A_tail_x,A_tail_y,A_tail_z;
double A_head_x,A_head_y,A_head_z;
double A_x,A_y,A_z;
double B_tail_x,B_tail_y,B_tail_z;
double B_head_x,B_head_y,B_head_z;
double B_x,B_y,B_z;
double C_x,C_y,C_z;
double R_x,R_y,R_z;
double lat_rdn;
double lon_rdn;
double dot_prd;
int crn_idx; /* [idx] Corner idx */
int A_tail_idx,A_head_idx;
int B_tail_idx,B_head_idx;
nco_bool flg_ccw; /* [flg] Input is CCW */
assert(crn_nbr == CRN_NBR_MSVC);
for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){
lat_rdn=crn_lat[crn_idx]*M_PI/180.0;
lon_rdn=crn_lon[crn_idx]*M_PI/180.0;
sin_lat[crn_idx]=sin(lat_rdn);
cos_lat[crn_idx]=cos(lat_rdn);
sin_lon[crn_idx]=sin(lon_rdn);
cos_lon[crn_idx]=cos(lon_rdn);
} /* !crn_idx */
/* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0
Calls from self can start from quadrilateral Point A or C
To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */
A_tail_idx=idx_ccw;
A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr;
B_head_idx=(B_tail_idx+1)%crn_nbr;
A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx];
A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx];
A_tail_z=sin_lat[A_tail_idx];
A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx];
A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx];
A_head_z=B_tail_z=R_z=sin_lat[A_head_idx];
B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx];
B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx];
B_head_z=sin_lat[B_head_idx];
A_x=A_head_x-A_tail_x;
A_y=A_head_y-A_tail_y;
A_z=A_head_z-A_tail_z;
B_x=B_head_x-B_tail_x;
B_y=B_head_y-B_tail_y;
B_z=B_head_z-B_tail_z;
/* Cross-Product C = A x B */
C_x=A_y*B_z-B_y*A_z;
C_y=-A_x*B_z+B_x*A_z;
C_z=A_x*B_y-B_x*A_y;
/* Dot-Product R dot C */
dot_prd=C_x*R_x+C_y*R_y+C_z*R_z;
if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False;
if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){
/* Original ABC is CCW, now check CDA */
idx_ccw=2;
flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1);
if(!flg_ccw){
if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd);
/* Triangularize quadrilateral D:=A */
/* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner right here even when flg_ccw was True :( */
crn_lat[3]=crn_lat[0];
crn_lon[3]=crn_lon[0];
return True;
} /* !flg_ccw */
}else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){
/* Original ABC is not CCW
20160124: Simplistic fix: reverse gridpoint order
This only works for quadrilaterals without degenerate points */
double crn_tmp;
if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd);
crn_tmp=crn_lat[1];
crn_lat[1]=crn_lat[3];
crn_lat[3]=crn_tmp;
crn_tmp=crn_lon[1];
crn_lon[1]=crn_lon[3];
crn_lon[3]=crn_tmp;
/* Check new triangle ABC */
idx_ccw=0;
flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1);
if(flg_ccw){
/* Inverted ABC is CCW, now check CDA */
idx_ccw=2;
flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1);
if(flg_ccw){
return True;
}else{
if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd);
/* Triangularize quadrilateral D:=A */
crn_lat[3]=crn_lat[0];
crn_lon[3]=crn_lon[0];
return True;
} /* flg_ccw */
}else{
/* Original and Inverted ABC are not CCW */
if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm);
return False;
} /* !flg_ccw */
} /* flg_ccw */
return flg_ccw;
} /* !nco_ccw_chk() */
|
GB_unaryop__abs_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_fp32
// op(A') function: GB_tran__abs_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pow_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16)
// C=scalar+B GB (_bind1st__pow_uint16)
// C=scalar+B' GB (_bind1st_tran__pow_uint16)
// C=A+scalar GB (_bind2nd__pow_uint16)
// C=A'+scalar GB (_bind2nd_tran__pow_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_pow_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_int32
// A.*B function (eWiseMult): GB_AemultB__ge_int32
// A*D function (colscale): GB_AxD__ge_int32
// D*A function (rowscale): GB_DxB__ge_int32
// C+=B function (dense accum): GB_Cdense_accumB__ge_int32
// C+=b function (dense accum): GB_Cdense_accumb__ge_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_int32
// C=scalar+B GB_bind1st__ge_int32
// C=scalar+B' GB_bind1st_tran__ge_int32
// C=A+scalar GB_bind2nd__ge_int32
// C=A'+scalar GB_bind2nd_tran__ge_int32
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT32 || GxB_NO_GE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lxor_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32)
// A*D function (colscale): GB (_AxD__lxor_uint32)
// D*A function (rowscale): GB (_DxB__lxor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32)
// C=scalar+B GB (_bind1st__lxor_uint32)
// C=scalar+B' GB (_bind1st_tran__lxor_uint32)
// C=A+scalar GB (_bind2nd__lxor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hmacSHA512_fmt_plug.c | /*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* Based on hmac-md5 by Bartavelle
*
* SIMD added Feb, 2015, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hmacSHA512;
extern struct fmt_main fmt_hmacSHA384;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hmacSHA512);
john_register_one(&fmt_hmacSHA384);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "base64_convert.h"
#include "formats.h"
#include "aligned.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // scaled on core i7-quad HT
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512 // scaled K8-dual HT
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "HMAC-SHA512"
#define FORMAT_LABEL_384 "HMAC-SHA384"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "password is key, SHA512 " SHA512_ALGORITHM_NAME
#define ALGORITHM_NAME_384 "password is key, SHA384 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 128
#define PAD_SIZE_W (PAD_SIZE/8)
#define BINARY_SIZE (512/8)
#define BINARY_SIZE_384 (384/8)
#define BINARY_ALIGN 8
#ifndef SIMD_COEF_64
#define SALT_LENGTH 1023
#define SALT_ALIGN 1
#else
#define SALT_LIMBS 2 /* 2 limbs, 239 bytes */
#define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 17)
#define SALT_ALIGN MEM_ALIGN_SIMD
#endif
#define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2)
#define CIPHERTEXT_LENGTH_384 (SALT_LENGTH + 1 + BINARY_SIZE_384 * 2)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i&127)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i&127)&7)) + index/SIMD_COEF_64 * PAD_SIZE * SIMD_COEF_64 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i&127)&(0xffffffff-7))*SIMD_COEF_64 + ((i&127)&7) + index/SIMD_COEF_64 * PAD_SIZE * SIMD_COEF_64 )
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"what do ya want for nothing?#164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737", "Jefe"},
{"Reference hashes are keys to success#73a5eff716d0147a440fdf5aff187c52deab8c4dc55073be3d5742e788a99fd6b53a5894725f0f88f3486b5bb63d2af930a0cf6267af572128273daf8eee4cfa", "The magnum"},
{"Beppe#Grillo#AB08C46822313481D548412A084F08C7CA3BBF8A98D901D14698759F4C36ADB07528348D56CAF4F6AF654E14FC102FF10DCF50794A82544426386C7BE238CEAF", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
{"hjXNSoAhq2YLm2vSFtc7BCJNUS9RNPcl#1c10f4d7237b034f27e7af81705e6cb0acecac395086e81e55a391a12b60b49e375b2de39c94f4989a50604754ffeea0b379ae1d4cc6b3550cd0a24a582ef351", "1"},
{"JkbHdY2Biype3gv2TpG2Wnv68OF7p6cl#a1f6e131e2fe1f728c5f2b8d0d8af9a6e202868ab9abef0e8f9126a712a4ae7f10533bbdedb710f6a521302c48a743caab1715aa85c4a57fbd51fde5e07945d9", "22"},
{"X4eOvWZw1b9L1NiND4vQxutubtrGhzNe#5a6002cedb05b97ce13393acab09767005a611dfc3e306305772c614ff4869077b3080f23694d3efc6d1998b4514fe8316389edb5f61dbcea8bd3b4d01595ae1", "333"},
{"VYG7HeRZLyie5jdzDRaqfd0yYX8PFstX#dd2b8b8a97c56af68fef5e73bf1eceec0c951084f97b66196b32758ed8b34a8d2f0e10663acac662e393fd42c0043e4cedf0d3c617ed43ba61b0297353fc2e2a", "4444"},
{"x8nIFPPTMJMEZLMSELpEub6bQjQzyjkq#fb92efe7d0abff004c8dc94c64356536df65dd42c323da1de4c583c255135b1a15002efc0b794683e7ac4ea7e7ae3813fb132b43c86a6951059a1574908987fb", "55555"},
{"Hr8KfafSSsEJfp5HZRLVAGQFrEPTDiSi#752e874177fc0f31149ebc699c32b2f7f600ad4d28f1fc27eb715a328100e6e67ff2845b20acd9ebc4befc7a629f1bd9a5b96abf981dcaba71317dcbb8cfdfba", "666666"},
{"UH0LvhZUihMMECAW0Ummw2OSgAOzV0i9#de3d4986007b1f45542f1d38d294ac69a0e23e2985103082a6ee134d4c786cfcb61d90be72388280e119e047bab32e68c6615d45d21895e5b8ef2b7eaf7258fd", "7777777"},
{"hX4OqAvhCjwEPwsi9I7SlIQbmlDb6LDh#cbf4fbb0721c9ec00af347d78046c314087efcbce47ef732e119433dc6f7fe3d2788e0a20d76bd2b1f9b199c9914eeaee0a51a2fb88cfbb7472b538e45b53711", "88888888"},
{"gOONPyTnQVKWMvh61x8Y1JGlDalKCBAE#9d4d34c76cb2a4cbecb8929be61dd4af5088a055bd338cd245311786c4119a5b526b72646626fff1cb4931eb0fe05d8a7648a66f0db1f2522b8af1cfc2ac8e74", "999999999"},
{"F3WBOJKUyVWbnqtGZ2ur8uW0nqIBpObK#6043dd6dd3dd96699db8351b0db762af27a5db06169ec6668e9f464fcc3fdf1d7deafaccb67e5ef7f5ee96b2a5efad33a8af20eb19fe60d8b20e7994c76a0610", "0000000000"},
{"pfZzfOSVpQvuILYEIAeCT8Xnj7eQnR2w#ff80da7bbcdb11fd8bb282a80603ed34847d897701fd547d06f4438072ecd43058a3b7c0b3a296f7c5dbbf06beb3825d1eb7122f01ad78ef2afc5ab09c46ca45", "11111111111"},
/* mockup JWT hash */
{"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.r7FDU+ahrbW0Wtsekh5UNqV2iyXGrQQaRZjdc8i733QIoTSIQM//FSGjP151C2ijvNUVo5syWOW+RpZc7khU1g", "magnum"},
{NULL}
};
static struct fmt_tests tests_384[] = {
{"what do ya want for nothing?#af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec3736322445e8e2240ca5e69e2c78b3239ecfab21649", "Jefe"},
{"Beppe#Grillo#8361922C63506E53714F8A8491C6621A76CF0FD6DFEAD91BF59B420A23DFF2745C0A0D5E142D4F937E714EA8C228835B", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
/* mockup JWT hash */
{"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.WNzjJCdDCTV3hLfsRy//hny9VzlaZXHFvoKSJXB5/rbKkXwE1Jve/DUirW7r5ztm", "magnum"},
{NULL}
};
#ifdef SIMD_COEF_64
static unsigned char *crypt_key;
static unsigned char *ipad, *prep_ipad;
static unsigned char *opad, *prep_opad;
typedef struct cur_salt_t {
unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT];
int salt_len;
} cur_salt_t;
static cur_salt_t *cur_salt;
static int bufsize;
#define SALT_SIZE sizeof(cur_salt_t)
#else
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
static unsigned char (*opad)[PAD_SIZE];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char cur_salt[SALT_LENGTH+1];
static SHA512_CTX *ipad_ctx;
static SHA512_CTX *opad_ctx;
#define SALT_SIZE sizeof(cur_salt)
#endif
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int new_keys;
#ifdef SIMD_COEF_64
static void clear_keys(void)
{
memset(ipad, 0x36, bufsize);
memset(opad, 0x5C, bufsize);
}
#endif
static void init(struct fmt_main *self, const int B_LEN)
{
#ifdef SIMD_COEF_64
int i;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE;
crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
prep_opad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
crypt_key[GETPOS(B_LEN, i)] = 0x80;
((uint64_t*)crypt_key)[15 * SIMD_COEF_64 + (i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (B_LEN + PAD_SIZE) << 3;
}
clear_keys();
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
ipad = mem_calloc(sizeof(*ipad), self->params.max_keys_per_crypt);
opad = mem_calloc(sizeof(*opad), self->params.max_keys_per_crypt);
ipad_ctx = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*opad_ctx), 8);
opad_ctx = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*opad_ctx), 8);
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void init_512(struct fmt_main *self) {
init(self, BINARY_SIZE);
}
static void init_384(struct fmt_main *self) {
init(self, BINARY_SIZE_384);
}
static void done(void)
{
MEM_FREE(saved_plain);
#ifdef SIMD_COEF_64
MEM_FREE(prep_opad);
MEM_FREE(prep_ipad);
#else
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
#endif
MEM_FREE(opad);
MEM_FREE(ipad);
MEM_FREE(crypt_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') &&
strchr(ciphertext, '.') != strrchr(ciphertext, '.')) {
// Treat this like a JWT hash. Convert into 'normal' hmac-sha512 format.
char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi;
strnzcpy(tmp, ciphertext, sizeof(tmp));
cpi = strchr(tmp, '.');
cpi = strchr(&cpi[1], '.');
if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN)
return ciphertext;
*cpi++ = 0;
memset(buf, 0, sizeof(buf));
base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex,
sizeof(buf), flg_Base64_NO_FLAGS, 0);
if (strlen(buf) != B_LEN * 2)
return ciphertext;
sprintf(out, "%s#%s", tmp, buf);
} else
strnzcpy(out, ciphertext, sizeof(out));
strlwr(strrchr(out, '#'));
return out;
}
static char *split_512(char *ciphertext, int index, struct fmt_main *self) {
return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH);
}
static char *split_384(char *ciphertext, int index, struct fmt_main *self) {
return split(ciphertext, index, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384);
}
static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN)
{
int pos, i;
char *p;
p = strrchr(ciphertext, '#'); // allow # in salt
if (!p && strchr(ciphertext, '.') &&
strchr(ciphertext, '.') != strrchr(ciphertext, '.')) {
if (strlen(ciphertext) > CT_LEN)
return 0;
ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN);
p = strrchr(ciphertext, '#');
}
if (!p || p > &ciphertext[strlen(ciphertext)-1])
return 0;
i = (int)(p - ciphertext);
if (i > SALT_LENGTH)
return 0;
pos = i + 1;
if (strlen(ciphertext + pos) != B_LEN * 2)
return 0;
for (i = pos; i < B_LEN * 2 + pos; i++)
{
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static int valid_512(char *ciphertext, struct fmt_main *self) {
return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH);
}
static int valid_384(char *ciphertext, struct fmt_main *self) {
return valid(ciphertext, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384);
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_64
cur_salt = salt;
#else
strcpy((char*)cur_salt, (char*)salt);
#endif
}
static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN)
{
int len;
#ifdef SIMD_COEF_64
#if ARCH_LITTLE_ENDIAN==1
uint64_t *ipadp = (uint64_t*)&ipad[GETPOS(7, index)];
uint64_t *opadp = (uint64_t*)&opad[GETPOS(7, index)];
#else
uint64_t *ipadp = (uint64_t*)&ipad[GETPOS(0, index)];
uint64_t *opadp = (uint64_t*)&opad[GETPOS(0, index)];
#endif
const uint64_t *keyp = (uint64_t*)key;
uint64_t temp;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
unsigned char k0[BINARY_SIZE];
SHA512_CTX ctx;
int i;
if (B_LEN == BINARY_SIZE) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, key, len);
SHA512_Final(k0, &ctx);
} else {
SHA384_Init(&ctx);
SHA384_Update(&ctx, key, len);
SHA384_Final(k0, &ctx);
}
keyp = (uint64_t*)k0;
for (i = 0; i < B_LEN / 8; i++, ipadp += SIMD_COEF_64, opadp += SIMD_COEF_64)
{
#if ARCH_LITTLE_ENDIAN==1
temp = JOHNSWAP64(*keyp++);
#else
temp = *keyp++;
#endif
*ipadp ^= temp;
*opadp ^= temp;
}
}
else
#if ARCH_LITTLE_ENDIAN==1
while(((temp = JOHNSWAP64(*keyp++)) & 0xff00000000000000ULL)) {
if (!(temp & 0x00ff000000000000ULL) || !(temp & 0x0000ff0000000000ULL))
{
((unsigned short*)ipadp)[3] ^=
(unsigned short)(temp >> 48);
((unsigned short*)opadp)[3] ^=
(unsigned short)(temp >> 48);
break;
}
if (!(temp & 0x00ff00000000ULL) || !(temp & 0x0000ff000000ULL))
{
((uint32_t*)ipadp)[1] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[1] ^=
(uint32_t)(temp >> 32);
break;
}
if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00))
{
((uint32_t*)ipadp)[1] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[1] ^=
(uint32_t)(temp >> 32);
((unsigned short*)ipadp)[1] ^=
(unsigned short)(temp >> 16);
((unsigned short*)opadp)[1] ^=
(unsigned short)(temp >> 16);
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0xff))
break;
ipadp += SIMD_COEF_64;
opadp += SIMD_COEF_64;
}
#else
while(((temp = *keyp++) & 0xff00000000000000ULL)) {
if (!(temp & 0x00ff000000000000ULL) || !(temp & 0x0000ff0000000000ULL))
{
((unsigned short*)ipadp)[0] ^=
(unsigned short)(temp >> 48);
((unsigned short*)opadp)[0] ^=
(unsigned short)(temp >> 48);
break;
}
if (!(temp & 0x00ff00000000ULL) || !(temp & 0x0000ff000000ULL))
{
((uint32_t*)ipadp)[0] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[0] ^=
(uint32_t)(temp >> 32);
break;
}
if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00))
{
((uint32_t*)ipadp)[0] ^=
(uint32_t)(temp >> 32);
((uint32_t*)opadp)[0] ^=
(uint32_t)(temp >> 32);
((unsigned short*)ipadp)[2] ^=
(unsigned short)(temp >> 16);
((unsigned short*)opadp)[2] ^=
(unsigned short)(temp >> 16);
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0xff))
break;
ipadp += SIMD_COEF_64;
opadp += SIMD_COEF_64;
}
#endif
#else
int i;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
if (len > PAD_SIZE) {
SHA512_CTX ctx;
unsigned char k0[BINARY_SIZE];
if (B_LEN == BINARY_SIZE) {
SHA512_Init( &ctx );
SHA512_Update( &ctx, key, len);
SHA512_Final( k0, &ctx);
} else {
SHA384_Init( &ctx );
SHA384_Update( &ctx, key, len);
SHA384_Final( k0, &ctx);
}
len = B_LEN;
for (i=0;i<len;i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
for (i=0;i<len;i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
#endif
new_keys = 1;
}
static void set_key_512(char *key, int index) {
set_key(key, index, BINARY_SIZE);
}
static void set_key_384(char *key, int index) {
set_key(key, index, BINARY_SIZE_384);
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_64
unsigned int index;
for (index = 0; index < count; index++) {
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64)
if (((uint64_t*)binary)[0] == ((uint64_t*)crypt_key)[(index&(SIMD_COEF_64-1))+index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64])
return 1;
}
return 0;
#else
int index = 0;
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index, int B_LEN)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < (B_LEN/8); i++)
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64)
if (((uint64_t*)binary)[i] != ((uint64_t*)crypt_key)[i * SIMD_COEF_64 + (index & (SIMD_COEF_64-1)) + (index/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], B_LEN);
#endif
}
static int cmp_one_512(void *binary, int index) {
return cmp_one(binary, index, BINARY_SIZE);
}
static int cmp_one_384(void *binary, int index) {
return cmp_one(binary, index, BINARY_SIZE_384);
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt,
#ifdef SIMD_COEF_64
const unsigned EX_FLAGS
#else
const int B_LEN
#endif
)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
unsigned int i;
if (new_keys) {
SIMDSHA512body(&ipad[index * PAD_SIZE],
(uint64_t*)&prep_ipad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN|EX_FLAGS);
SIMDSHA512body(&opad[index * PAD_SIZE],
(uint64_t*)&prep_opad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN|EX_FLAGS);
}
SIMDSHA512body(cur_salt->salt[0],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&prep_ipad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
for (i = 1; i <= (cur_salt->salt_len + 16) / PAD_SIZE; i++)
SIMDSHA512body(cur_salt->salt[i],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&crypt_key[index * PAD_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
if (EX_FLAGS) {
// NOTE, SSESHA384 will output 64 bytes. We need the first 48 (plus the 0x80 padding).
// so we are forced to 'clean' this crap up, before using the crypt as the input.
uint64_t *pclear = (uint64_t*)&crypt_key[index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64*8];
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) {
pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64] = 0x8000000000000000ULL;
pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64+SIMD_COEF_64] = 0;
}
}
SIMDSHA512body(&crypt_key[index * PAD_SIZE],
(uint64_t*)&crypt_key[index * PAD_SIZE],
(uint64_t*)&prep_opad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS);
#else
SHA512_CTX ctx;
// Note, for oSSL, we really only need SHA512_Init and SHA384_Init. From that point
// on, SHA512_Update/SHA512_Final can be used. Also, jtr internal sha2.c file works
// like that. BUT I am not sure every hash engine works that way, so we are keeping
// the 'full' block.
if (B_LEN == BINARY_SIZE) {
if (new_keys) {
SHA512_Init(&ipad_ctx[index]);
SHA512_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA512_Init(&opad_ctx[index]);
SHA512_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA512_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA512_Update( &ctx, crypt_key[index], B_LEN);
SHA512_Final( (unsigned char*) crypt_key[index], &ctx);
} else {
if (new_keys) {
SHA384_Init(&ipad_ctx[index]);
SHA384_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA384_Init(&opad_ctx[index]);
SHA384_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA384_Update( &ctx, cur_salt, strlen( (char*) cur_salt) );
SHA384_Final( (unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA384_Update( &ctx, crypt_key[index], B_LEN);
SHA384_Final( (unsigned char*) crypt_key[index], &ctx);
}
#endif
}
new_keys = 0;
return count;
}
static int crypt_all_512(int *pcount, struct db_salt *salt) {
#ifdef SIMD_COEF_64
return crypt_all(pcount, salt, 0);
#else
return crypt_all(pcount, salt, BINARY_SIZE);
#endif
}
static int crypt_all_384(int *pcount, struct db_salt *salt) {
#ifdef SIMD_COEF_64
return crypt_all(pcount, salt, SSEi_CRYPT_SHA384);
#else
return crypt_all(pcount, salt, BINARY_SIZE_384);
#endif
}
static void *get_binary(char *ciphertext, const int B_LEN)
{
JTR_ALIGN(BINARY_ALIGN) static unsigned char realcipher[BINARY_SIZE];
int i,pos;
for (i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt
pos=i+1;
for (i=0;i<B_LEN;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])];
#if defined(SIMD_COEF_64) && ARCH_LITTLE_ENDIAN==1
alter_endianity_w64(realcipher, B_LEN/8);
#endif
return (void*)realcipher;
}
static void *get_binary_512(char *ciphertext) {
return get_binary(ciphertext, BINARY_SIZE);
}
static void *get_binary_384(char *ciphertext) {
return get_binary(ciphertext, BINARY_SIZE_384);
}
static void *get_salt(char *ciphertext)
{
static unsigned char salt[SALT_LENGTH+1];
int len;
#ifdef SIMD_COEF_64
unsigned int i = 0;
static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt;
int salt_len = 0;
#endif
// allow # in salt
len = strrchr(ciphertext, '#') - ciphertext;
memset(salt, 0, sizeof(salt));
memcpy(salt, ciphertext, len);
#ifdef SIMD_COEF_64
memset(&cur_salt, 0, sizeof(cur_salt));
while(((unsigned char*)salt)[salt_len])
{
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] =
((unsigned char*)salt)[salt_len];
++salt_len;
}
cur_salt.salt_len = salt_len;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80;
((uint64_t*)cur_salt.salt[(salt_len+16) / PAD_SIZE])[15 * SIMD_COEF_64 + (i & (SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (salt_len + PAD_SIZE) << 3;
}
return &cur_salt;
#else
return salt;
#endif
}
struct fmt_main fmt_hmacSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests
}, {
init_512,
done,
fmt_default_reset,
fmt_default_prepare,
valid_512,
split_512,
get_binary_512,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key_512,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all_512,
{
fmt_default_get_hash
},
cmp_all,
cmp_one_512,
cmp_exact
}
};
struct fmt_main fmt_hmacSHA384 = {
{
FORMAT_LABEL_384,
FORMAT_NAME,
ALGORITHM_NAME_384,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE_384,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests_384
}, {
init_384,
done,
fmt_default_reset,
fmt_default_prepare,
valid_384,
split_384,
get_binary_384,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key_384,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all_384,
{
fmt_default_get_hash
},
cmp_all,
cmp_one_384,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
cast_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jiejun@openailab.com
*/
#include "cast_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct cast_param* cast_param = (struct cast_param*)ir_node->op.param_mem;
int type_from = input_tensor->data_type;
int type_to = output_tensor->data_type;
int num_thread = exec_graph->num_thread;
if (input_tensor->elem_num != output_tensor->elem_num || input_tensor->dim_num != output_tensor->dim_num)
{
return -1;
}
if (type_from == type_to)
{
memcpy(output_tensor->data, input_tensor->data, input_tensor->elem_num * input_tensor->elem_size);
return 0;
}
for (uint8_t i = 0; i < input_tensor->dim_num; i++)
{
if (input_tensor->dims[i] != output_tensor->dims[i])
return -1;
}
if (input_tensor->layout != output_tensor->layout)
{
return -1;
}
if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_FP16)
{
fp32_t* idata = (fp32_t*)input_tensor->data;
fp16_t* odata = (fp16_t*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = fp32_to_fp16(idata[i]);
}
return 0;
}
if (type_from == TENGINE_DT_FP16 && type_to == TENGINE_DT_FP32)
{
fp16_t* idata = (fp16_t*)input_tensor->data;
fp32_t* odata = (fp32_t*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = fp16_to_fp32(idata[i]);
}
return 0;
}
if (type_from == TENGINE_DT_FP32 && type_to == TENGINE_DT_UINT8)
{
float* idata = (float*)input_tensor->data;
uint8_t* odata = (uint8_t*)output_tensor->data;
if (1 == input_tensor->quant_param_num)
{
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
int val = (int)(roundf(idata[i] / scale)) + zero_point;
if (255 >= val && 0 <= val)
odata[i] = (uint8_t)val;
else
{
if (255 < val)
odata[i] = 255;
if (0 > val)
odata[i] = 0;
}
}
return 0;
}
}
if (type_from == TENGINE_DT_UINT8 && type_to == TENGINE_DT_FP32)
{
uint8_t* idata = (uint8_t*)input_tensor->data;
float* odata = (float*)output_tensor->data;
if (1 == input_tensor->quant_param_num)
{
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
#pragma omp parallel for num_threads(num_thread)
for (uint32_t i = 0; i < input_tensor->elem_num; i++)
{
odata[i] = (float)(idata[i] - zero_point) * scale;
}
return 0;
}
}
return -1;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* ir_graph = node->graph;
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
(void)node_ops;
(void)exec_graph;
(void)exec_node;
return OPS_SCORE_CANDO;
}
static struct node_ops ref_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_cast_ref_op()
{
return register_builtin_node_ops(OP_CAST, &ref_node_ops);
}
int unregister_cast_ref_op()
{
return unregister_builtin_node_ops(OP_CAST, &ref_node_ops);
}
|
setup_template.h | /*
* setup_template.h
*
* Created on: Oct 3, 2019
* Author: carol
*/
#ifndef SETUP_TEMPLATE_H_
#define SETUP_TEMPLATE_H_
#include <random>
#include <omp.h>
#if __CUDA_ARCH__ >= 600
#include <cuda_fp16.h>
#endif
#include "include/cuda_utils.h"
#include "Parameters.h"
#include "include/generic_log.h"
#include "types.h"
#include "common.h"
#include "File.h"
#include "KernelCaller.h"
template<typename real_t>
void generateInput(dim_str dim_cpu, std::string& input_distances,
std::vector<FOUR_VECTOR<real_t>>& rv_cpu, std::string& input_charges,
std::vector<real_t>& qv_cpu) {
if (File<real_t>::exists(input_distances) && File<real_t>::exists(input_charges)) {
return;
}
// random generator seed set to random value - time in this case
std::cout << ("Generating input...\n");
// get a number in the range 0.1 - 1.0
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<real_t> dis(0.1, 1.0);
rv_cpu.resize(dim_cpu.space_elem);
qv_cpu.resize(dim_cpu.space_elem);
for (auto& rv_cpu_i : rv_cpu) {
rv_cpu_i.v = real_t(dis(gen));
rv_cpu_i.x = real_t(dis(gen));
rv_cpu_i.y = real_t(dis(gen));
rv_cpu_i.z = real_t(dis(gen));
}
std::cout << "TESTE " << rv_cpu[35].x << std::endl;
if (File<FOUR_VECTOR<real_t>>::write_to_file(input_distances, rv_cpu)) {
error("error writing rv_cpu from file\n");
}
for (auto& qv_cpu_i : qv_cpu) {
// get a number in the range 0.1 - 1.0
qv_cpu_i = real_t(dis(gen));
}
if (File<real_t>::write_to_file(input_charges, qv_cpu)) {
error("error writing qv_cpu from file\n");
}
}
template<typename real_t>
void readInput(dim_str dim_cpu, std::string& input_distances,
std::vector<FOUR_VECTOR<real_t>>& rv_cpu, std::string& input_charges,
std::vector<real_t>& qv_cpu, int fault_injection) {
rv_cpu.resize(dim_cpu.space_elem);
qv_cpu.resize(dim_cpu.space_elem);
if (File<FOUR_VECTOR<real_t>>::read_from_file(input_distances, rv_cpu)) {
error("error reading rv_cpu from file\n");
}
if (File<real_t>::read_from_file(input_charges, qv_cpu)) {
error("error reading qv_cpu from file\n");
}
// =============== Fault injection
if (fault_injection) {
qv_cpu[2] = 0.732637263; // must be in range 0.1 - 1.0
std::cout << "!!> Fault injection: qv_cpu[2]= " << qv_cpu[2] << std::endl;
}
// ========================
}
template<typename real_t>
void readGold(dim_str dim_cpu, std::string& output_gold,
std::vector<FOUR_VECTOR<real_t>>& fv_cpu_GOLD) {
if (File<FOUR_VECTOR<real_t>>::read_from_file(output_gold, fv_cpu_GOLD)) {
error("error reading fv_cpu_GOLD from file\n");
}
}
template<typename real_t>
void writeGold(dim_str dim_cpu, std::string& output_gold,
std::vector<FOUR_VECTOR<real_t>>& fv_cpu) {
int number_zeros = 0;
for (auto& fv_cpu_i : fv_cpu) {
if (fv_cpu_i.v == real_t(0.0))
number_zeros++;
if (fv_cpu_i.x == real_t(0.0))
number_zeros++;
if (fv_cpu_i.y == real_t(0.0))
number_zeros++;
if (fv_cpu_i.z == real_t(0.0))
number_zeros++;
}
if (File<FOUR_VECTOR<real_t>>::write_to_file(output_gold, fv_cpu)) {
error("error writing fv_cpu from file\n");
}
std::cout << "Number of zeros " << number_zeros << std::endl;
}
template<typename real_t>
void gpu_memory_setup(const Parameters& parameters, VectorOfDeviceVector<box_str>& d_box_gpu,
std::vector<box_str>& box_cpu, VectorOfDeviceVector<FOUR_VECTOR<real_t>>& d_rv_gpu,
std::vector<FOUR_VECTOR<real_t>>& rv_cpu, VectorOfDeviceVector<real_t>& d_qv_gpu,
std::vector<real_t>& qv_cpu, VectorOfDeviceVector<FOUR_VECTOR<real_t>>& d_fv_gpu,
std::vector<std::vector<FOUR_VECTOR<real_t>>>& fv_cpu,
rad::DeviceVector<FOUR_VECTOR<real_t>>& d_fv_gold_gpu, std::vector<FOUR_VECTOR<real_t>>& fv_cpu_GOLD) {
for (int stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
d_box_gpu[stream_idx] = box_cpu;
d_rv_gpu[stream_idx] = rv_cpu;
d_qv_gpu[stream_idx] = qv_cpu;
d_fv_gpu[stream_idx] = fv_cpu[stream_idx];
}
if (parameters.gpu_check) {
d_fv_gold_gpu = fv_cpu_GOLD;
}
}
template<typename real_t>
void gpu_memory_unset(const Parameters& parameters, VectorOfDeviceVector<box_str>& d_box_gpu,
VectorOfDeviceVector<FOUR_VECTOR<real_t>>& d_rv_gpu, VectorOfDeviceVector<real_t>& d_qv_gpu,
VectorOfDeviceVector<FOUR_VECTOR<real_t>>& d_fv_gpu,
rad::DeviceVector<FOUR_VECTOR<real_t>>& d_fv_gold_gpu) {
//=====================================================================
// GPU MEMORY DEALLOCATION
//=====================================================================
for (int stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
d_rv_gpu[stream_idx].resize(0);
d_qv_gpu[stream_idx].resize(0);
d_fv_gpu[stream_idx].resize(0);
d_box_gpu[stream_idx].resize(0);
}
if (parameters.gpu_check) {
d_fv_gold_gpu.resize(0);
}
}
template<const uint32_t COUNT, typename half_t, typename real_t>
void setup_execution(Parameters& parameters, rad::Log& log,
KernelCaller<COUNT, half_t, real_t>& kernel_caller) {
//=====================================================================
// CPU/MCPU VARIABLES
//=====================================================================
// timer
double timestamp;
// system memory
par_str<real_t> par_cpu;
dim_str dim_cpu;
std::vector<box_str> box_cpu;
std::vector<FOUR_VECTOR<real_t>> rv_cpu;
std::vector<real_t> qv_cpu;
std::vector<FOUR_VECTOR<real_t>> fv_cpu_GOLD;
int nh;
int number_nn = 0;
//=====================================================================
// CHECK INPUT ARGUMENTS
//=====================================================================
dim_cpu.boxes1d_arg = parameters.boxes;
//=====================================================================
// INPUTS
//=====================================================================
par_cpu.alpha = 0.5;
//=====================================================================
// DIMENSIONS
//=====================================================================
// total number of boxes
dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg;
// how many particles space has in each direction
dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX;
dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR<real_t> );
dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(real_t);
// box array
dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str);
//=====================================================================
// SYSTEM MEMORY
//=====================================================================
// prepare host memory to receive kernel output
// output (forces)
std::vector < std::vector<FOUR_VECTOR<real_t>>>fv_cpu(parameters.nstreams);
kernel_caller.set_half_t_vectors(parameters.nstreams, dim_cpu.space_elem);
for (auto& fv_cpu_i : fv_cpu) {
fv_cpu_i.resize(dim_cpu.space_elem);
}
fv_cpu_GOLD.resize(dim_cpu.space_elem);
//=====================================================================
// BOX
//=====================================================================
// allocate boxes
box_cpu.resize(dim_cpu.number_boxes);
// initialize number of home boxes
nh = 0;
// home boxes in z direction
for (int i = 0; i < dim_cpu.boxes1d_arg; i++) {
// home boxes in y direction
for (int j = 0; j < dim_cpu.boxes1d_arg; j++) {
// home boxes in x direction
for (int k = 0; k < dim_cpu.boxes1d_arg; k++) {
// current home box
box_cpu[nh].x = k;
box_cpu[nh].y = j;
box_cpu[nh].z = i;
box_cpu[nh].number = nh;
box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX;
// initialize number of neighbor boxes
box_cpu[nh].nn = 0;
// neighbor boxes in z direction
for (int l = -1; l < 2; l++) {
// neighbor boxes in y direction
for (int m = -1; m < 2; m++) {
// neighbor boxes in x direction
for (int n = -1; n < 2; n++) {
// check if (this neighbor exists) and (it is not the same as home box)
if ((((i + l) >= 0 && (j + m) >= 0 && (k + n) >= 0) == true
&& ((i + l) < dim_cpu.boxes1d_arg
&& (j + m) < dim_cpu.boxes1d_arg
&& (k + n) < dim_cpu.boxes1d_arg) == true)
&& (l == 0 && m == 0 && n == 0) == false) {
// current neighbor box
box_cpu[nh].nei[box_cpu[nh].nn].x = (k + n);
box_cpu[nh].nei[box_cpu[nh].nn].y = (j + m);
box_cpu[nh].nei[box_cpu[nh].nn].z = (i + l);
box_cpu[nh].nei[box_cpu[nh].nn].number =
(box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg
* dim_cpu.boxes1d_arg)
+ (box_cpu[nh].nei[box_cpu[nh].nn].y
* dim_cpu.boxes1d_arg)
+ box_cpu[nh].nei[box_cpu[nh].nn].x;
box_cpu[nh].nei[box_cpu[nh].nn].offset =
box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX;
// increment neighbor box
box_cpu[nh].nn = box_cpu[nh].nn + 1;
number_nn += box_cpu[nh].nn;
}
} // neighbor boxes in x direction
} // neighbor boxes in y direction
} // neighbor boxes in z direction
// increment home box
nh = nh + 1;
} // home boxes in x direction
} // home boxes in y direction
} // home boxes in z direction
//=====================================================================
// PARAMETERS, DISTANCE, CHARGE AND FORCE
//=====================================================================
if (parameters.generate) {
generateInput(dim_cpu, parameters.input_distances, rv_cpu, parameters.input_charges,
qv_cpu);
readInput(dim_cpu, parameters.input_distances, rv_cpu, parameters.input_charges, qv_cpu,
parameters.fault_injection);
} else {
readInput(dim_cpu, parameters.input_distances, rv_cpu, parameters.input_charges, qv_cpu,
parameters.fault_injection);
readGold(dim_cpu, parameters.output_gold, fv_cpu_GOLD);
}
//=====================================================================
// EXECUTION PARAMETERS
//=====================================================================
dim3 threads;
dim3 blocks;
blocks.x = dim_cpu.number_boxes;
blocks.y = 1;
// define the number of threads in the block
threads.x = NUMBER_THREADS;
threads.y = 1;
//=====================================================================
// GPU_CUDA
//=====================================================================
//=====================================================================
// STREAMS
//=====================================================================
std::vector<CudaStream> streams(parameters.nstreams);
//=====================================================================
// VECTORS
//=====================================================================
VectorOfDeviceVector<box_str> d_box_gpu(parameters.nstreams);
VectorOfDeviceVector<FOUR_VECTOR<real_t>> d_rv_gpu(parameters.nstreams);
VectorOfDeviceVector<real_t> d_qv_gpu(parameters.nstreams);
VectorOfDeviceVector<FOUR_VECTOR<real_t>> d_fv_gpu(parameters.nstreams);
rad::DeviceVector<FOUR_VECTOR<real_t>> d_fv_gold_gpu;
//=====================================================================
// GPU MEMORY SETUP
//=====================================================================
gpu_memory_setup(parameters, d_box_gpu, box_cpu, d_rv_gpu, rv_cpu, d_qv_gpu, qv_cpu, d_fv_gpu,
fv_cpu, d_fv_gold_gpu, fv_cpu_GOLD);
//LOOP START
for (int loop = 0; loop < parameters.iterations; loop++) {
if (parameters.verbose)
std::cout << "======== Iteration #" << loop << "========\n";
double globaltimer = rad::mysecond();
timestamp = rad::mysecond();
//=====================================================================
// GPU SETUP
//=====================================================================
for (uint32_t stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
auto& it = fv_cpu[stream_idx];
std::fill(it.begin(), it.end(), FOUR_VECTOR<real_t>());
d_fv_gpu[stream_idx].clear();
}
kernel_caller.clear_half_t();
if (parameters.verbose)
std::cout << "Setup prepare time: " << rad::mysecond() - timestamp << "s\n";
//=====================================================================
// KERNEL
//=====================================================================
double kernel_time = rad::mysecond();
log.start_iteration();
// launch kernel - all boxes
for (uint32_t stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
kernel_caller.kernel_call(blocks, threads, streams[stream_idx], par_cpu, dim_cpu,
d_box_gpu[stream_idx].data(), d_rv_gpu[stream_idx].data(),
d_qv_gpu[stream_idx].data(), d_fv_gpu[stream_idx].data(), stream_idx);
//rad::checkFrameworkErrors (cudaGetLastError());;
}
for (auto i = 0; i < streams.size(); i++) {
streams[i].sync();
//rad::checkFrameworkErrors (cudaGetLastError());;
}
log.end_iteration();
kernel_time = rad::mysecond() - kernel_time;
auto cpy_time = rad::mysecond();
for (uint32_t stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
fv_cpu[stream_idx] = d_fv_gpu[stream_idx].to_vector();
}
cpy_time = rad::mysecond() - cpy_time;
//=====================================================================
// COMPARE OUTPUTS / WRITE GOLD
//=====================================================================
if (parameters.generate) {
// fv_cpu_GOLD = d_fv_gpu[0].to_vector();
writeGold(dim_cpu, parameters.output_gold, fv_cpu[0]);
} else {
timestamp = rad::mysecond();
bool reload_flag = false;
#pragma omp parallel for shared(reloadFlag, fv_cpu, fv_cpu_GOLD, log)
for (uint32_t stream_idx = 0; stream_idx < parameters.nstreams; stream_idx++) {
// fv_cpu[stream_idx] = d_fv_gpu[stream_idx].to_vector();
auto error = kernel_caller.check_output_errors(parameters.verbose, stream_idx,
fv_cpu[stream_idx], fv_cpu_GOLD, log);
#pragma omp atomic
reload_flag = reload_flag || error;
}
if (reload_flag) {
readInput(dim_cpu, parameters.input_distances, rv_cpu, parameters.input_charges,
qv_cpu, parameters.fault_injection);
readGold(dim_cpu, parameters.output_gold, fv_cpu_GOLD);
gpu_memory_unset(parameters, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu,
d_fv_gold_gpu);
gpu_memory_setup(parameters, d_box_gpu, box_cpu, d_rv_gpu, rv_cpu, d_qv_gpu, qv_cpu,
d_fv_gpu, fv_cpu, d_fv_gold_gpu, fv_cpu_GOLD);
}
if (parameters.verbose)
std::cout << "Gold check time: " << rad::mysecond() - timestamp << std::endl;
}
//================= PERF
// iterate for each neighbor of a box (number_nn)
double flop = number_nn;
// The last for iterate NUMBER_PAR_PER_BOX times
flop *= NUMBER_PAR_PER_BOX;
// the last for uses 46 operations plus 2 exp() functions
flop *= 46;
flop *= parameters.nstreams;
double flops = flop / kernel_time;
double outputpersec = dim_cpu.space_elem * 4 * parameters.nstreams / kernel_time;
double iteration_time = rad::mysecond() - globaltimer;
if (parameters.verbose) {
std::cout << "BOXES: " << dim_cpu.boxes1d_arg;
std::cout << " BLOCK: " << NUMBER_THREADS;
std::cout << " OUTPUT/S:" << outputpersec;
std::cout << " FLOPS:" << flops;
std::cout << " (GFLOPS:" << flops / 1.0e9 << ") ";
std::cout << "Kernel time:" << kernel_time << std::endl;
std::cout << "Copy time:" << cpy_time << std::endl;
std::cout << "Iteration time: " << iteration_time << "s ("
<< (kernel_time / iteration_time) * 100.0 << "% of Device)" << std::endl;
std::cout << "===================================" << std::endl;
} else {
std::cout << ".";
}
}
if (parameters.generate) {
kernel_caller.sync_half_t();
std::cout << "Max element threshold " << kernel_caller.get_max_threshold(fv_cpu)
<< std::endl;
}
}
#endif /* SETUP_TEMPLATE_H_ */
|
sectionsModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
void funcA() {
printf("En funcA: esta ejecuta el thread %d\n",omp_get_thread_num());
}
void funcB() {
printf("En funcB: esta sección la ejecuta el thread %d\n",omp_get_thread_num());
}
int main(int argc, char ** argv)
{
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{
#ifdef _OPENMP
#pragma omp section
#endif
(void) funcA();
#ifdef _OPENMP
#pragma omp section
#endif
(void) funcB();
}
return 0;
} |
LAGraph_BF_full.c | //------------------------------------------------------------------------------
// LAGraph_BF_full.c: Bellman-Ford single-source shortest paths, returns tree
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_BF_full: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w. Furthermore, LAGraph_BF_full
// requires A(i, i) = 0 for all 0 <= i < n.
// LAGraph_BF_full returns GrB_SUCCESS if successful, and GrB_NO_VALUE if it
// detects the existence of negative- weight cycle. The GrB_Vector d(k), pi(k)
// and h(k) (i.e., *pd_output, *ppi_output and *ph_output respectively) will
// be NULL when negative-weight cycle detected. Otherwise, the vector d has
// d(k) as the shortest distance from s to k. pi(k) = p+1, where p is the
// parent node of k-th node in the shortest path. In particular, pi(s) = 0.
// h(k) = hop(s, k), the number of edges from s to k in the shortest path.
//------------------------------------------------------------------------------
#define LAGraph_FREE_ALL \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I); \
LAGraph_Free ((void**)&J); \
LAGraph_Free ((void**)&w); \
LAGraph_Free ((void**)&W); \
LAGraph_Free ((void**)&h); \
LAGraph_Free ((void**)&pi); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w ;
z->h = x->h + y->h ;
z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ;
}
void BF_EQ
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
(*z) = (x->w == y->w && x->h == y->h && x->pi == y->pi) ;
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has zeros on diagonal and weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
char *msg = NULL ;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
LG_CHECK (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL, -1001, "inputs are NULL") ;
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
GrB_TRY (GrB_Matrix_nrows (&nrows, A)) ;
GrB_TRY (GrB_Matrix_ncols (&ncols, A)) ;
GrB_TRY (GrB_Matrix_nvals (&nz, A));
LG_CHECK (nrows != ncols, -1002, "A must be square") ;
n = nrows;
LG_CHECK (s >= n || s < 0, -1003, "invalid source node") ;
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
GrB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct)));
// GrB_BinaryOp
GrB_TRY (GrB_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3));
GrB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
GrB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
GrB_TRY (GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
GrB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_Malloc (nz, sizeof(double)) ;
W = LAGraph_Malloc (nz, sizeof(BF_Tuple3_struct)) ;
LG_CHECK (I == NULL || J == NULL || w == NULL || W == NULL,
-1004, "out of memory") ;
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
GrB_TRY (GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads;
LAGRAPH_OK (LAGraph_GetNumThreads (&nthreads, NULL)) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
GrB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
GrB_TRY (GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
GrB_TRY (GrB_Vector_new(&d, BF_Tuple3, n));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
GrB_TRY (GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
GrB_TRY (GrB_Vector_dup(&dtmp, d));
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
GrB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
GrB_TRY (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
// if d != dtmp, then there is a negative-weight cycle in the graph
LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LAGraph_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
GrB_TRY (GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d));
h = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
pi = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
LG_CHECK (w == NULL || h == NULL || pi == NULL, -1004, "out of memory") ;
for (GrB_Index k = 0; k < nz; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
GrB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n));
GrB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n));
GrB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n));
GrB_TRY (GrB_Vector_build_FP64 (*pd_output , I, w , nz,GrB_MIN_FP64 ));
GrB_TRY (GrB_Vector_build_UINT64(*ppi_output, I, pi, nz,GrB_MIN_UINT64));
GrB_TRY (GrB_Vector_build_UINT64(*ph_output , I, h , nz,GrB_MIN_UINT64));
LAGraph_FREE_ALL;
return (GrB_SUCCESS) ;
}
|
GB_unop__identity_uint64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fp32)
// op(A') function: GB (_unop_tran__identity_uint64_fp32)
// C type: uint64_t
// A type: float
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fp32)
(
uint64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallelQuicksort.c | void parallelQuicksortRecursive(int* arr, int start, int end);
void swap(int* arr, int i, int j);
// Quicksort entry-point
void parallelQuicksort(int* arr, int len)
{
if (len > 0)
parallelQuicksortRecursive(arr, 0, len - 1);
}
// Recursive quicksort implementarion
void parallelQuicksortRecursive(int* arr, int start, int end)
{
int i = start;
int j = end;
int pivot = arr[(i + j) / 2]; // Get the element in the middle of the array and set it as the pivot
if (start < end)
{
while(i <= j)
{
// Increment i until the value it's pointed at is greater than the pivot
while (arr[i] < pivot) i++;
// Decrement j until the value it's pointed at is lesser than the pivot
while (arr[j] > pivot) j--;
if (i <= j)
{
swap(arr, i, j);
i++;
j--;
}
}
#pragma omp parallel sections
{
#pragma omp section
parallelQuicksortRecursive(arr, start, j); // Calls quicksort on the left-most part of the array
#pragma omp section
parallelQuicksortRecursive(arr, i, end); // Calls quicksort on the right-most part of the array
}
}
}
|
volumeData.h | #ifndef _VOLUME3D_H
#define _VOLUME3D_H
#include <cstdio>
#include <cstring>
#include <cmath>
#include <string>
#include <iostream>
#include <cassert>
#include <sstream>
#include <omp.h>
#include <limits>
using namespace std;
template<typename T>
class volumeData {
public:
volumeData(T *data, int z, int y, int x);
~volumeData();
int getDimX() { return _dimX; }
int getDimY() { return _dimY; }
int getDimZ() { return _dimZ; }
T *getDataBuffer() { return _data; }
//status
void updateMaxMin();
T getMin() { return _min; }
T getMax() { return _max; }
private:
std::string _filename;
T *_data;
int _dimX, _dimY, _dimZ;
T _min, _max;
FILE *pFile{};
};
template<typename T>
volumeData<T>::volumeData(T *data, int z, int y, int x)
:_dimX(x), _dimY(y), _dimZ(z) {
_data = data;
this->updateMaxMin();
}
template<typename T>
volumeData<T>::~volumeData() {
if (_data)
free(_data);
}
template<typename T>
void volumeData<T>::updateMaxMin() {
_min = std::numeric_limits<T>::max();
_max = std::numeric_limits<T>::min();
#pragma omp parallel for collapse(2)
for (int k = 0; k < _dimZ; k++) {
for (int j = 0; j < _dimY; j++)
for (int i = 0; i < _dimX; i++) {
T value = _data[_dimX * _dimY * (k) + _dimX * j + i];
if (value < _min)
#pragma omp critical
_min = value;
if (value > _max)
#pragma omp critical
_max = value;
}
}
}
#endif
|
GB_unop__trunc_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fp64_fp64)
// op(A') function: GB (_unop_tran__trunc_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = trunc (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = trunc (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = trunc (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = trunc (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = trunc (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi.c | #include <stdio.h>
#include <omp.h>
static long num_steps = 100000;
double step, pi;
int main() {
long i;
double x, sum = 0.0;
step = 1.0 / (double) num_steps;
#pragma omp parallel for private(x) reduction(+:sum)
for (i = 0; i < num_steps; i++) {
x = (i + 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
pi = step * sum;
printf("Pi = %f\n", pi);
return 0;
}
|
imd_distrib.c | /******************************************************************************
*
* IMD -- The ITAP Molecular Dynamics Program
*
* Copyright 1996-2012 Institute for Theoretical and Applied Physics,
* University of Stuttgart, D-70550 Stuttgart
*
******************************************************************************/
/******************************************************************************
*
* imd_distrib.c -- distributions of various quantities
*
******************************************************************************/
/******************************************************************************
* $Revision$
* $Date$
******************************************************************************/
#include "imd.h"
float *dat_1 = NULL, *dat_2 = NULL;
float *num_1 = NULL, *num_2 = NULL;
int dist_size;
/******************************************************************************
*
* write distributions
*
******************************************************************************/
void write_distrib(int steps)
{
char contents[255];
int fzhlr, n, i, j, k;
is_big_endian = endian();
/* backup if dist_ur is not set */
if (0.0==dist_ur.x) {
dist_ur.x = box_x.x;
dist_ur.y = box_y.y;
#ifndef TWOD
dist_ur.z = box_z.z;
#endif
}
dist_size = dist_dim.x * dist_dim.y;
#ifndef TWOD
dist_size *= dist_dim.z;
#endif
#ifdef BG
n = 1; /* here we write presstens components in separate files */
#else
n = dist_presstens_flag ? DIM*(DIM+1)/2 : 1;
#endif
#ifndef MPI
dist_chunk_size = dist_size;
#endif
#if defined(BGL) && (defined(TIMING) || defined(DEBUG))
if (myid==0)
printf("%d MB free before distribution allocation\n", get_free_mem());
#endif
#if defined(BG) && defined(NBLIST)
deallocate_nblist();
#endif
#if defined(BGL) && defined(NBLIST) && (defined(TIMING) || defined(DEBUG))
if (myid==0)
printf("%d MB free after nblist deallocation\n", get_free_mem());
#endif
/* allocate distribution arrays */
#ifdef MPI2
MPI_Alloc_mem( n * dist_size * sizeof(float), MPI_INFO_NULL, &dat_1 );
MPI_Alloc_mem( dist_size * sizeof(float), MPI_INFO_NULL, &num_1 );
MPI_Alloc_mem( n * dist_chunk_size * sizeof(float), MPI_INFO_NULL, &dat_2 );
num_2 = dat_1;
#elif defined(MPI)
dat_1 = (float *) malloc( n * dist_size * sizeof(float) );
num_1 = (float *) malloc( dist_size * sizeof(float) );
dat_2 = (float *) malloc( n * dist_chunk_size * sizeof(float) );
num_2 = dat_1;
#else
dat_1 = (float *) malloc( n * dist_size * sizeof(float) );
num_1 = (float *) malloc( dist_size * sizeof(float) );
dat_2 = dat_1;
num_2 = num_1;
#endif
if ((NULL==dat_1) || (NULL==num_1) || (NULL==dat_2) || (NULL==num_2))
error("Cannot allocate distribution data.");
#if defined(BGL) && (defined(TIMING) || defined(DEBUG))
if (myid==0)
printf("%d MB free after distribution allocation\n", get_free_mem());
#endif
fzhlr = steps / dist_int;
/* make number density distribution */
make_distrib_density();
// MYMOD
if (dist_mdtemp_flag) {
make_distrib_temperature(fzhlr);
}
//ENDOF MYMOD
if (dist_Ekin_flag) {
make_write_distrib_select( 1, dist_Ekin_fun,
dist_Ekin_flag, fzhlr, "Ekin", "Ekin");
}
if (dist_Epot_flag) {
make_write_distrib_select( 1, dist_Epot_fun,
dist_Epot_flag, fzhlr, "Epot", "Epot");
}
#ifdef STRESS_TENS
if (dist_press_flag) {
make_write_distrib_select( 1, dist_press_fun,
dist_press_flag, fzhlr, "press", "press");
}
if (dist_presstens_flag) {
#ifdef TWOD
sprintf(contents, "P_xx P_yy P_xy");
#else
sprintf(contents, "P_xx P_yy P_zz P_yz P_zx P_xy");
#endif
#ifdef BG
/* to save memory, we write each componend in separate file */
make_write_distrib_select( 1, dist_presstens_xx_fun,
dist_presstens_flag, fzhlr, "presstens_xx", "presstens_xx" );
make_write_distrib_select( 1, dist_presstens_yy_fun,
dist_presstens_flag, fzhlr, "presstens_yy", "presstens_yy" );
#ifndef TWOD
make_write_distrib_select( 1, dist_presstens_zz_fun,
dist_presstens_flag, fzhlr, "presstens_zz", "presstens_zz" );
make_write_distrib_select( 1, dist_presstens_yz_fun,
dist_presstens_flag, fzhlr, "presstens_yz", "presstens_yz" );
make_write_distrib_select( 1, dist_presstens_zx_fun,
dist_presstens_flag, fzhlr, "presstens_zx", "presstens_zx" );
#endif
make_write_distrib_select( 1, dist_presstens_xy_fun,
dist_presstens_flag, fzhlr, "presstens_xy", "presstens_xy" );
#else
make_write_distrib_select( DIM*(DIM+1)/2, dist_presstens_fun,
dist_presstens_flag, fzhlr, "presstens", contents);
#endif /* BG */
}
#endif /* STRESS_TENS */
#if defined(SHOCK) || defined(TTM)
if (dist_vxavg_flag) {
make_write_distrib_select( 1, dist_vxavg_fun,
dist_vxavg_flag, fzhlr, "vxavg", "vxavg");
}
#endif
#ifdef SHOCK
if (dist_Ekin_long_flag) {
make_write_distrib_select( 1, dist_Ekin_long_fun,
dist_Ekin_long_flag, fzhlr, "Ekin_long", "Ekin_long");
}
if (dist_Ekin_trans_flag) {
make_write_distrib_select( 1, dist_Ekin_trans_fun,
dist_Ekin_trans_flag, fzhlr, "Ekin_trans", "Ekin_trans");
}
if (dist_Ekin_comp_flag) {
make_write_distrib_select( 1, dist_Ekin_comp_fun,
dist_Ekin_comp_flag, fzhlr, "Ekin_comp", "Ekin_comp");
}
if (dist_shock_shear_flag) {
make_write_distrib_select( 1, dist_shock_shear_fun,
dist_shock_shear_flag, fzhlr, "shock_shear", "shock_shear");
}
if (dist_shear_aniso_flag) {
make_write_distrib_select( 1, dist_shear_aniso_fun,
dist_shear_aniso_flag, fzhlr, "shear_aniso", "shear_aniso");
}
if (dist_pressoff_flag) {
make_write_distrib_select( 1, dist_pressxy_fun,
dist_pressoff_flag, fzhlr, "pressxy", "pressxy");
make_write_distrib_select( 1, dist_pressyz_fun,
dist_pressoff_flag, fzhlr, "pressyz", "pressyz");
make_write_distrib_select( 1, dist_presszx_fun,
dist_pressoff_flag, fzhlr, "presszx", "presszx");
}
#endif /* SHOCK */
/* write density distribution */
if ((myid==0) && (dist_dens_flag)) {
write_distrib_density(dist_dens_flag, fzhlr);
}
/* free distribution arrays */
#ifdef MPI2
MPI_Free_mem(dat_1);
MPI_Free_mem(num_1);
MPI_Free_mem(dat_2);
#elif defined(MPI)
free(dat_1);
free(num_1);
free(dat_2);
#else
free(dat_1);
free(num_1);
#endif
#if defined(BGL) && (defined(TIMING) || defined(DEBUG))
if (myid==0)
printf("%d MB free after distribution deallocation\n", get_free_mem());
#endif
}
/******************************************************************************
*
* selection function for kinetic energy
*
******************************************************************************/
void dist_Ekin_fun(float *dat, cell *p, int i)
{
*dat += SPRODN(IMPULS,p,i,IMPULS,p,i) / (2 * MASSE(p,i));
}
/******************************************************************************
*
* selection function for potential energy
*
******************************************************************************/
void dist_Epot_fun(float *dat, cell *p, int i)
{
#ifdef DISLOC
if (Epot_diff==1)
*dat += POTENG(p,i) - EPOT_REF(p,i);
else
#endif
*dat += POTENG(p,i);
}
#ifdef STRESS_TENS
/******************************************************************************
*
* selection function for scalar pressure
*
******************************************************************************/
void dist_press_fun(float *dat, cell *p, int i)
{
#ifdef TWOD
*dat += (PRESSTENS(p,i,xx) + PRESSTENS(p,i,yy)) / 2.0;
#else
*dat += (PRESSTENS(p,i,xx) + PRESSTENS(p,i,yy) + PRESSTENS(p,i,zz)) / 3.0;
#endif
}
/******************************************************************************
*
* selection functions for pressure tensor
*
******************************************************************************/
void dist_presstens_fun(float *dat, cell *p, int i)
{
int k=0;
dat[k++] += PRESSTENS(p,i,xx);
dat[k++] += PRESSTENS(p,i,yy);
#ifndef TWOD
dat[k++] += PRESSTENS(p,i,zz);
dat[k++] += PRESSTENS(p,i,yz);
dat[k++] += PRESSTENS(p,i,zx);
#endif
dat[k++] += PRESSTENS(p,i,xy);
}
void dist_presstens_xx_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,xx);
}
void dist_presstens_yy_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,yy);
}
#ifndef TWOD
void dist_presstens_zz_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,zz);
}
void dist_presstens_yz_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,yz);
}
void dist_presstens_zx_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,zx);
}
#endif
void dist_presstens_xy_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,xy);
}
#endif /* STRESS_TENS */
#ifdef SHOCK
/******************************************************************************
*
* selection function for various shock quantities
*
******************************************************************************/
#ifdef STRESS_TENS
/* shear stress */
void dist_shock_shear_fun(float *dat, cell *p, int i)
{
*dat += (PRESSTENS(p,i,xx)-(PRESSTENS(p,i,yy)+PRESSTENS(p,i,zz))/2.0)/2.0;
}
void dist_shear_aniso_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,yy)-PRESSTENS(p,i,zz);
}
void dist_pressxy_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,xy);
}
void dist_pressyz_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,yz);
}
void dist_presszx_fun(float *dat, cell *p, int i)
{
*dat += PRESSTENS(p,i,zx);
}
#endif
/* transversal kinetic energy */
void dist_Ekin_trans_fun(float *dat, cell *p, int i)
{
*dat += (SQR(IMPULS(p,i,Y)) + SQR(IMPULS(p,i,Z))) / (4 * MASSE(p,i));
}
/* difference kinetic energy */
void dist_Ekin_comp_fun(float *dat, cell *p, int i)
{
*dat += (SQR(IMPULS(p,i,Y)) - SQR(IMPULS(p,i,Z))) / (2 * MASSE(p,i));
}
/* longitudinal kinetic energy */
void dist_Ekin_long_fun(float *dat, cell *p, int i)
{
*dat += SQR(IMPULS(p,i,X) - PXAVG(p,i)) / (2*MASSE(p,i));
}
#endif //SHOCK
#if defined(SHOCK) || defined(TTM)
/* average sample velocity */
void dist_vxavg_fun(float *dat, cell *p, int i)
{
*dat += IMPULS(p,i,X) / MASSE(p,i);
}
#endif /* SHOCK */
/******************************************************************************
*
* make density distribution
*
******************************************************************************/
void make_distrib_density(void)
{
cell *p;
real scalex, scaley, scalez;
int num, numx, numy, numz;
int i, j, k, m, chunk_size;
/* the bins are orthogonal boxes in space */
scalex = dist_dim.x / (dist_ur.x - dist_ll.x);
scaley = dist_dim.y / (dist_ur.y - dist_ll.y);
#ifndef TWOD
scalez = dist_dim.z / (dist_ur.z - dist_ll.z);
#endif
/* clear distribution */
for (i=0; i<dist_size; i++) num_2[i] = 0.0;
/* loop over all atoms */
for (k=0; k<NCELLS; ++k) {
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
/* which bin? */
numx = scalex * (ORT(p,i,X) - dist_ll.x);
if ((numx < 0) || (numx >= dist_dim.x)) continue;
numy = scaley * (ORT(p,i,Y) - dist_ll.y);
if ((numy < 0) || (numy >= dist_dim.y)) continue;
num = numx * dist_dim.y + numy;
#ifndef TWOD
numz = scalez * (ORT(p,i,Z) - dist_ll.z);
if ((numz < 0) || (numz >= dist_dim.z)) continue;
num = num * dist_dim.z + numz;
#endif
num_2[num] += 1.0;
}
}
/* add up results form different CPUs */
#ifdef MPI
#ifdef BG
/* doing it in several chunks saves memory */
for (m = 0; m < dist_size; m += dist_chunk_size) {
chunk_size = MIN( dist_size - m, dist_chunk_size );
MPI_Reduce(num_2+m, num_1+m, chunk_size, MPI_FLOAT, MPI_SUM, 0, cpugrid);
}
#else
MPI_Reduce( num_2, num_1, dist_size, MPI_FLOAT, MPI_SUM, 0, cpugrid);
#endif
#endif
}
/******************************************************************************
*
* write density distribution
*
******************************************************************************/
void write_distrib_density(int mode, int fzhlr)
{
FILE *outfile;
char fname[255];
float fac, max=0.0, min=1e10;
int i, j, count, r, s, t;
real vol;
/* open distribution file, write header */
sprintf(fname, "%s.%u.%s", outfilename, fzhlr, "dens");
outfile = fopen(fname, "w");
if (NULL == outfile) error("Cannot open distribution file.");
write_distrib_header(outfile, mode, 1, "dens");
/* compute density, minima and maxima */
vol = (dist_ur.x - dist_ll.x) * (dist_ur.y - dist_ll.y);
#ifndef TWOD
vol *= (dist_ur.z - dist_ll.z);
#endif
fac = dist_size / vol;
for (i=0; i<dist_size; i++) {
dat_1[i] = num_1[i] * fac;
max = MAX( max, dat_1[i] );
min = MIN( min, dat_1[i] );
}
/* write distribution */
if (mode==DIST_FORMAT_BINARY) {
count = fwrite(dat_1, sizeof(float), dist_size, outfile);
if (count != dist_size) warning("distribution write incomplete!");
}
else if ((mode==DIST_FORMAT_ASCII) || (mode==DIST_FORMAT_ASCII_COORD)) {
i=0;
for (r=0; r<dist_dim.x; r++)
for (s=0; s<dist_dim.y; s++)
#ifndef TWOD
for (t=0; t<dist_dim.z; t++)
#endif
{
if (mode==DIST_FORMAT_ASCII_COORD) {
#ifdef TWOD
fprintf(outfile, "%d %d ", r, s);
#else
fprintf(outfile, "%d %d %d ", r, s, t);
#endif
}
fprintf(outfile,"%e\n", dat_1[i++]);
}
}
else error("unknown distribution output format");
fclose(outfile);
/* write minmax */
sprintf(fname, "%s.minmax.%s", outfilename, "dens");
outfile = fopen(fname, "a");
if (NULL == outfile) error("Cannot open minmax file.");
fprintf(outfile, "%d %e %e\n", fzhlr, min, max);
fclose(outfile);
}
//MYMOD
/******************************************************************************
*
* make temperature distribution
*
******************************************************************************/
void make_distrib_temperature(int fzhlr) //, char *suffix, char *cont)
{
cell *p;
real scalex, scaley, scalez;
int num, numx, numy, numz;
int i,k;
float *vcomx,*vcomy,*vcomz;
float *vcomx_r,*vcomy_r,*vcomz_r; //reduced
float *totmass, *totmass_r;
float *mdtemp,*mdtemp_r;
vcomx = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
vcomy = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
vcomz = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
vcomx_r = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
vcomy_r = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
vcomz_r = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
totmass = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
totmass_r = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
mdtemp = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
mdtemp_r = (float *) malloc( dist_dim.x * dist_dim.y * dist_dim.z* sizeof(float) );
/* the bins are orthogonal boxes in space */
scalex = dist_dim.x / (dist_ur.x - dist_ll.x);
scaley = dist_dim.y / (dist_ur.y - dist_ll.y);
scalez = dist_dim.z / (dist_ur.z - dist_ll.z);
/* clear distribution */
for (i=0; i<dist_size; i++){
vcomx[i]=vcomy[i]=vcomz[i]=0.0;
totmass[i]=0.0;
mdtemp[i]=0.0;
num_2[i] = 0.0;
}
//1st loop over atoms to get vcom
for (k=0; k<NCELLS; ++k) {
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
/* which bin? */
numx = scalex * (ORT(p,i,X) - dist_ll.x);
if ((numx < 0) || (numx >= dist_dim.x)) continue;
numy = scaley * (ORT(p,i,Y) - dist_ll.y);
if ((numy < 0) || (numy >= dist_dim.y)) continue;
num = numx * dist_dim.y + numy;
numz = scalez * (ORT(p,i,Z) - dist_ll.z);
if ((numz < 0) || (numz >= dist_dim.z)) continue;
num = num * dist_dim.z + numz;
vcomx[num] += IMPULS(p,i,X) ;
vcomy[num] += IMPULS(p,i,Y) ;
vcomz[num] += IMPULS(p,i,Z) ;
totmass[num] += MASSE(p,i);
num_2[num] += 1.0; //Atomzahl
}
}
/* add up results form different CPUs */
#ifdef MPI
MPI_Allreduce( vcomx, vcomx_r, dist_size, MPI_FLOAT, MPI_SUM, cpugrid);
MPI_Allreduce( vcomy, vcomy_r, dist_size, MPI_FLOAT, MPI_SUM, cpugrid);
MPI_Allreduce( vcomz, vcomz_r, dist_size, MPI_FLOAT, MPI_SUM, cpugrid);
MPI_Allreduce( totmass, totmass_r, dist_size, MPI_FLOAT, MPI_SUM, cpugrid);
MPI_Reduce( num_2, num_1, dist_size, MPI_FLOAT, MPI_SUM, 0, cpugrid); //Atomzahl
#endif
for(i=0;i<dist_size;i++)
{
if(totmass_r[i]>0 )
{
vcomx_r[i] /= totmass_r[i];
vcomy_r[i] /= totmass_r[i];
vcomz_r[i] /= totmass_r[i];
//printf("myid:%d, i:%d, vcomx:%f\n",myid,i,vcomx_r[i]);
}
}
//2nd loop
for (k=0; k<NCELLS; ++k)
{
p = CELLPTR(k);
for (i=0; i<p->n; ++i)
{
/* which bin? */
numx = scalex * (ORT(p,i,X) - dist_ll.x);
if ((numx < 0) || (numx >= dist_dim.x)) continue;
numy = scaley * (ORT(p,i,Y) - dist_ll.y);
if ((numy < 0) || (numy >= dist_dim.y)) continue;
num = numx * dist_dim.y + numy;
numz = scalez * (ORT(p,i,Z) - dist_ll.z);
if ((numz < 0) || (numz >= dist_dim.z)) continue;
num = num * dist_dim.z + numz;
mdtemp[num] += MASSE(p,i) * SQR(IMPULS(p,i,X)/MASSE(p,i) - vcomx_r[num]);
mdtemp[num] += MASSE(p,i) * SQR(IMPULS(p,i,Y)/MASSE(p,i) - vcomy_r[num]);
mdtemp[num] += MASSE(p,i) * SQR(IMPULS(p,i,Z)/MASSE(p,i) - vcomz_r[num]);
//printf("myid:%d, md:%f\n",myid,mdtemp[num]);
}
}
#ifdef MPI
MPI_Reduce( mdtemp, mdtemp_r, dist_size, MPI_FLOAT, MPI_SUM, 0, cpugrid);
#endif
for(i=0;i<dist_size;i++)
{
if(totmass_r[i]>0 )
{
mdtemp_r[i] /= 3.0 * ((float) num_1[i]);
}
}
///////////// WRITE distributions /////////////////
if(myid==0)
{
FILE *outfile;
char fname[255];
int r, s, t;
/* open distribution file, write header */
sprintf(fname, "%s.%u.%s", outfilename, fzhlr, "mdtemp");
outfile = fopen(fname, "w");
if (NULL == outfile) error("Cannot open md-temp distribution file.");
/* write distribution */
i=0;
for (r=0; r<dist_dim.x; r++)
{
for (s=0; s<dist_dim.y; s++)
{
for (t=0; t<dist_dim.z; t++)
{
fprintf(outfile,"%e\n", mdtemp_r[i++]);
}
}
}
fclose(outfile);
}
/* write minmax */
// WOZU?
// sprintf(fname, "%s.minmax.%s", outfilename, "dens");
// outfile = fopen(fname, "a");
// if (NULL == outfile) error("Cannot open minmax file.");
// fprintf(outfile, "%d %e %e\n", fzhlr, min, max);
// fclose(outfile);
//FREE MEM
free(mdtemp);
free(mdtemp_r);
free(vcomx);
free(vcomy);
free(vcomz);
free(vcomx_r);
free(vcomy_r);
free(vcomz_r);
free(totmass);
free(totmass_r);
}
//ENDOF MYMOD
/******************************************************************************
*
* make and write distribution of selected variables
*
******************************************************************************/
void make_write_distrib_select(int n, void (*fun)(float*, cell*, int),
int mode, int fzhlr, char *suffix, char *cont)
{
cell *p;
real scalex, scaley, scalez;
int num, numx, numy, numz;
int i, j, k, count, r, s, t, m, chunk_size;
float max[6], min[6];
FILE *outfile=NULL;
char fname[255];
/* the bins are orthogonal boxes in space */
scalex = dist_dim.x / (dist_ur.x - dist_ll.x);
scaley = dist_dim.y / (dist_ur.y - dist_ll.y);
#ifndef TWOD
scalez = dist_dim.z / (dist_ur.z - dist_ll.z);
#endif
/* clear distribution */
for (i=0; i<n*dist_size; i++) dat_1[i] = 0.0;
/* loop over all atoms */
for (k=0; k<NCELLS; ++k) {
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
/* which bin? */
numx = scalex * (ORT(p,i,X) - dist_ll.x);
if ((numx < 0) || (numx >= dist_dim.x)) continue;
numy = scaley * (ORT(p,i,Y) - dist_ll.y);
if ((numy < 0) || (numy >= dist_dim.y)) continue;
num = numx * dist_dim.y + numy;
#ifndef TWOD
numz = scalez * (ORT(p,i,Z) - dist_ll.z);
if ((numz < 0) || (numz >= dist_dim.z)) continue;
num = num * dist_dim.z + numz;
#endif
(*fun)(dat_1 + n * num, p, i);
}
}
/* open distribution file, write header */
if (myid==0) {
sprintf(fname, "%s.%u.%s", outfilename, fzhlr, suffix);
outfile = fopen(fname, "w");
if (NULL == outfile) error("Cannot open distribution file.");
write_distrib_header(outfile, mode, n, cont);
for (k=0; k<n; k++) {
min[k] = 1e+10;
max[k] = -1e+10;
}
}
/* collect and write data in handy chunks */
r = s = t = 0;
for (m = 0; m < dist_size; m += dist_chunk_size) {
chunk_size = MIN( dist_size - m, dist_chunk_size );
/* add up results form different CPUs */
#ifdef MPI
MPI_Reduce(dat_1+n*m, dat_2, n*chunk_size, MPI_FLOAT, MPI_SUM, 0, cpugrid);
#endif
if (myid==0) {
/* normalize distribution, compute minima and maxima */
for (i=0; i<chunk_size; i++) {
if (num_1[m+i] > 0.0) {
for (k=0; k<n; k++) {
dat_2[n*i+k] /= num_1[m+i];
min[k] = MIN( min[k], dat_2[n*i+k] );
max[k] = MAX( max[k], dat_2[n*i+k] );
}
}
}
/* write distribution */
if (mode==DIST_FORMAT_BINARY) {
count = fwrite(dat_2, sizeof(float), n*chunk_size, outfile);
if (count != n*chunk_size) warning("distribution write incomplete!");
}
else if ((mode==DIST_FORMAT_ASCII) || (mode==DIST_FORMAT_ASCII_COORD)) {
for (i=0; i<chunk_size; i++) {
if (mode==DIST_FORMAT_ASCII_COORD) {
#ifdef TWOD
fprintf(outfile, "%d %d", r, s++);
if (s==dist_dim.y) { s=0; r++; }
#else
fprintf(outfile, "%d %d %d", r, s, t++);
if (t==dist_dim.z) { t=0; s++; }
if (s==dist_dim.y) { s=0; r++; }
#endif
}
#ifdef BG
fprintf(outfile," %e\n", dat_2[i]);
#else
for (k=0; k<n; k++) fprintf(outfile," %e", dat_2[n*i+k]);
fprintf(outfile, "\n");
#endif
}
}
else error("unknown distribution output format");
}
}
/* close distribution file */
if (myid==0) fclose(outfile);
/* write minmax */
if (myid==0) {
sprintf(fname, "%s.minmax.%s", outfilename, suffix);
outfile = fopen(fname, "a");
if (NULL == outfile) error("Cannot open minmax file.");
fprintf( outfile, "%d ", fzhlr );
for (i=0; i<n; i++) fprintf(outfile, " %e %e", min[i], max[i]);
fprintf(outfile, "\n");
fclose(outfile);
}
}
/******************************************************************************
*
* write header of distribution files
*
******************************************************************************/
void write_distrib_header(FILE *out, int mode, int n, char *cont)
{
char c;
int n_coord;
time_t now;
vektor s;
/* format line -- format dim n_coord n_data */
if (mode==DIST_FORMAT_BINARY)
c = is_big_endian ? 'B' : 'L';
else
c = 'A';
n_coord = (mode==DIST_FORMAT_ASCII_COORD) ? DIM : 0;
fprintf(out, "#F %c %d %d %d\n", c, DIM, n_coord, n);
/* contents line */
if (mode==DIST_FORMAT_ASCII_COORD)
#ifdef TWO
fprintf(out, "#C x y %s\n", cont);
#else
fprintf(out, "#C x y z %s\n", cont);
#endif
else
fprintf(out, "#C %s\n", cont);
/* dimension line */
#ifdef TWOD
fprintf(out, "#D %d %d\n", dist_dim.x, dist_dim.y);
#else
fprintf(out, "#D %d %d %d\n", dist_dim.x, dist_dim.y, dist_dim.z);
#endif
/* bin size line */
s.x = (dist_ur.x - dist_ll.x) / dist_dim.x;
s.y = (dist_ur.y - dist_ll.y) / dist_dim.y;
#ifdef TWOD
fprintf(out, "#S %e %e\n", s.x, s.y);
#else
s.z = (dist_ur.z - dist_ll.z) / dist_dim.z;
fprintf(out, "#S %e %e %e\n", s.x, s.y, s.z);
#endif
/* endheader line */
time(&now);
fprintf(out, "## Generated on %s", ctime(&now) );
fprintf(out, "## by %s (version of %s)\n", progname, DATE);
fprintf(out, "#E\n");
}
#ifdef ATDIST
/******************************************************************************
*
* initialize atoms distribution array
*
******************************************************************************/
void init_atdist()
{
int size, i;
/* compute array size */
atdist_size = atdist_dim.x * atdist_dim.y;
#ifndef TWOD
atdist_size *= atdist_dim.z;
#endif
size = atdist_size * ntypes;
/* atdist_ll and atdist_ur must be set */
if (0.0==atdist_ur.x) {
error("atdist_ll and atdist_ur must be set");
}
/* the bins are orthogonal boxes in space */
atdist_scale.x = atdist_dim.x / (atdist_ur.x - atdist_ll.x);
atdist_scale.y = atdist_dim.y / (atdist_ur.y - atdist_ll.y);
#ifndef TWOD
atdist_scale.z = atdist_dim.z / (atdist_ur.z - atdist_ll.z);
#endif
/* allocate distribution array */
if (NULL==atdist) {
atdist = (float *) malloc( size * sizeof(float) );
if (NULL==atdist) error("Cannot allocate atoms distribution array.");
}
/* initialize distribution array */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<size; i++) atdist[i]=0.0;
}
/******************************************************************************
*
* update atoms distribution array
*
******************************************************************************/
void update_atdist()
{
int num, numx, numy, numz;
cell *p;
int i, k, ix, iy, iz;
real x, y, z, t, co, si;
#ifdef CM_HACK
static vektor tot_velocity = {0.0,0.0,0.0};
static int step_count=0, max_count=100;
int count=0;
#endif
co = cos(atdist_phi);
si = sin(atdist_phi);
#ifdef CM_HACK
/* correct center of mass velocity */
for (k=0; k<NCELLS; ++k) {
int i;
cell *p;
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
if (SORTE(p,i)>0) {
tot_velocity.x += IMPULS(p,i,X) / MASSE(p,i);
tot_velocity.y += IMPULS(p,i,Y) / MASSE(p,i);
tot_velocity.z += IMPULS(p,i,Z) / MASSE(p,i);
count++;
}
}
}
step_count++;
if (step_count % max_count) {
for (k=0; k<NCELLS; ++k) {
int i;
cell *p;
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
IMPULS(p,i,X) -= tot_velocity.x * MASSE(p,i) / count;
IMPULS(p,i,Y) -= tot_velocity.y * MASSE(p,i) / count;
IMPULS(p,i,Z) -= tot_velocity.z * MASSE(p,i) / count;
}
}
tot_velocity.x = 0.0;
tot_velocity.y = 0.0;
tot_velocity.z = 0.0;
count=0;
} else {
step_count++;
}
#endif /* CM_HACK */
/* loop over all atoms */
for (k=0; k<NCELLS; ++k) {
p = CELLPTR(k);
for (i=0; i<p->n; ++i)
/* periodic continuation */
for (ix=atdist_per_ll.x; ix<=atdist_per_ur.x; ix++)
#ifndef TWOD
for (iz=atdist_per_ll.z; iz<=atdist_per_ur.z; iz++)
#endif
for (iy=atdist_per_ll.y; iy<=atdist_per_ur.y; iy++) {
#ifdef TWOD
x = ORT(p,i,X) + ix * box_x.x + iy * box_y.x;
y = ORT(p,i,Y) + ix * box_x.y + iy * box_y.y;
#else
x = ORT(p,i,X) + ix * box_x.x + iy * box_y.x + iz * box_z.x;
y = ORT(p,i,Y) + ix * box_x.y + iy * box_y.y + iz * box_z.y;
z = ORT(p,i,Z) + ix * box_x.z + iy * box_y.z + iz * box_z.z;
#endif
t = co * x + si * y;
y = -si * x + co * y;
x = t;
/* continue if atom is not inside selected box */
if ((x < atdist_ll.x) || (x > atdist_ur.x) ||
#ifndef TWOD
(z < atdist_ll.z) || (z > atdist_ur.z) ||
#endif
(y < atdist_ll.y) || (y > atdist_ur.y)) continue;
/* which bin? */
numx = atdist_scale.x * (x - atdist_ll.x);
if (numx < 0) numx = 0;
if (numx >= atdist_dim.x) numx = atdist_dim.x-1;
numy = atdist_scale.y * (y - atdist_ll.y);
if (numy < 0) numy = 0;
if (numy >= atdist_dim.y) numy = atdist_dim.y-1;
num = numx * atdist_dim.y + numy;
#ifndef TWOD
numz = atdist_scale.z * (z - atdist_ll.z);
if (numz < 0) numz = 0;
if (numz >= atdist_dim.z) numz = atdist_dim.z-1;
num = num * atdist_dim.z + numz;
#endif
num = SORTE(p,i) * atdist_size + num;
atdist[num] += 1.0;
}
}
}
/******************************************************************************
*
* write atoms distribution array
*
******************************************************************************/
void write_atdist()
{
int num, numx, numy, numz, size;
int i, k;
char c;
str255 fname;
FILE *out;
is_big_endian = endian();
if (myid == 0) {
sprintf(fname,"%s.atdist",outfilename);
out = fopen(fname,"w");
if (NULL == out) error("Cannot open atoms distribution file.");
c = is_big_endian ? 'B' : 'L';
fprintf(out,"#F %c %d 0 %d\n", c, DIM, ntypes);
fprintf(out,"#C");
for (i=0; i<ntypes; i++) fprintf(out," density_%d",i);
fprintf(out,"\n");
#ifdef TWOD
fprintf(out,"#D %d %d\n", atdist_dim.x, atdist_dim.y);
fprintf(out,"#S %f %f\n", 1.0 / atdist_scale.x, 1.0 / atdist_scale.y);
#else
fprintf(out,"#D %d %d %d\n",
atdist_dim.x, atdist_dim.y, atdist_dim.z);
fprintf(out,"#S %f %f %f\n",
1.0 / atdist_scale.x, 1.0 / atdist_scale.y, 1.0 / atdist_scale.z);
#endif
fprintf(out,"#E\n");
size = atdist_size * ntypes;
if (size!=fwrite(atdist, sizeof(float), size, out))
error("Cannot write atoms distribution");
fclose(out);
}
}
#endif /* ATDIST */
#ifdef DIFFPAT
/******************************************************************************
*
* initialize atoms distribution array
*
******************************************************************************/
void init_diffpat()
{
int size, i;
#ifdef OMP
fftwf_init_threads();
fftwf_plan_with_nthreads(omp_get_max_threads());
#endif
#ifdef TIMING
imd_init_timer( &time_fft, 0, NULL, NULL );
imd_init_timer( &time_fft_plan, 0, NULL, NULL );
#endif
/* compute array size */
diffpat_size = diffpat_dim.x * diffpat_dim.y * 2 * (diffpat_dim.z / 2 + 1);
/* diffpat_ll and diffpat_ur must be set */
if (0.0==diffpat_ur.x) {
error("diffpat_ll and diffpat_ur must be set");
}
/* diffpat_weight must be set */
if (0.0==diffpat_weight[0]) {
error("diffpat_weight must be set");
}
/* the bins are orthogonal boxes in space */
diffpat_scale.x = diffpat_dim.x / (diffpat_ur.x - diffpat_ll.x);
diffpat_scale.y = diffpat_dim.y / (diffpat_ur.y - diffpat_ll.y);
diffpat_scale.z = diffpat_dim.z / (diffpat_ur.z - diffpat_ll.z);
/* allocate arrays */
if (NULL==diffdist) {
diffdist = (float *) fftwf_malloc( diffpat_size * sizeof(float) );
diffpat = (float *) malloc( (diffpat_size / 2) * sizeof(float) );
if ((NULL==diffdist) || (NULL==diffpat))
error("Cannot allocate diffraction pattern array.");
}
/* make fftw plan */
#ifdef TIMING
imd_start_timer(&time_fft_plan);
#endif
if ((diffpat_end - diffpat_start) % diffpat_int > 50)
diffpat_plan = fftwf_plan_dft_r2c_3d(
diffpat_dim.x, diffpat_dim.y, diffpat_dim.z,
diffdist, (fftwf_complex *) diffdist, FFTW_MEASURE);
else
diffpat_plan = fftwf_plan_dft_r2c_3d(
diffpat_dim.x, diffpat_dim.y, diffpat_dim.z,
diffdist, (fftwf_complex *) diffdist, FFTW_ESTIMATE);
#ifdef TIMING
imd_stop_timer(&time_fft_plan);
printf("Time for FFT plan: %f\n", time_fft_plan.total);
#endif
/* initialize arrays */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<diffpat_size; i++) diffdist[i]=0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<diffpat_size/2; i++) diffpat [i]=0.0;
}
/******************************************************************************
*
* update atoms distribution array
*
******************************************************************************/
void update_diffpat(int steps)
{
int num, numx, numy, numz, k, i;
real x, y, z;
int dimz = 2 * (diffpat_dim.z / 2 + 1);
int dimz2 = (diffpat_dim.z / 2 + 1);
fftwf_complex *dist_out = (fftwf_complex *) diffdist;
/* loop over all atoms */
for (k=0; k<NCELLS; ++k) {
cell *p;
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
x = ORT(p,i,X);
y = ORT(p,i,Y);
z = ORT(p,i,Z);
/* continue if atom is not inside selected box */
if ((x < diffpat_ll.x) || (x > diffpat_ur.x) ||
(z < diffpat_ll.z) || (z > diffpat_ur.z) ||
(y < diffpat_ll.y) || (y > diffpat_ur.y)) continue;
/* which bin? */
numx = diffpat_scale.x * (x - diffpat_ll.x);
if (numx < 0) numx = 0;
if (numx >= diffpat_dim.x) numx = diffpat_dim.x-1;
numy = diffpat_scale.y * (y - diffpat_ll.y);
if (numy < 0) numy = 0;
if (numy >= diffpat_dim.y) numy = diffpat_dim.y-1;
numz = diffpat_scale.z * (z - diffpat_ll.z);
if (numz < 0) numz = 0;
if (numz >= diffpat_dim.z) numz = diffpat_dim.z-1;
num = (numx * diffpat_dim.y + numy) * dimz + numz;
diffdist[num] += diffpat_weight[ SORTE(p,i) ];
}
}
/* increment diffraction pattern */
if (0==steps%diffpat_int) {
#ifdef TIMING
imd_start_timer(&time_fft);
#endif
fftwf_execute(diffpat_plan);
#ifdef TIMING
imd_stop_timer(&time_fft);
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<diffpat_size/2; i++)
diffpat[i] += (float)(SQR(dist_out[i][0])+SQR(dist_out[i][1]));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<diffpat_size; i++) diffdist[i]=0.0;
}
}
/******************************************************************************
*
* write diffraction pattern
*
******************************************************************************/
void write_diffpat()
{
int num, numx, numy, numz, len;
int dimz2 = diffpat_dim.z / 2 + 1;
real pi, ddx, ddy, ddz;
char c;
str255 fname;
FILE *out;
if (myid == 0) {
/* open file */
sprintf(fname,"%s.diffpat",outfilename);
if (NULL==(out=fopen(fname,"w"))) error("Cannot open output file");
pi = 4 * atan( (double) 1.0 );
ddx = 2 * pi * diffpat_scale.x / diffpat_dim.x;
ddy = 2 * pi * diffpat_scale.y / diffpat_dim.y;
ddz = 2 * pi * diffpat_scale.z / diffpat_dim.z;
/* write file header */
if (endian()) c='B'; else c='L';
fprintf(out, "#F %c 3 0 1\n", c );
fprintf(out, "#C Fourier\n");
fprintf(out, "#D %d %d %d\n", diffpat_dim.x, diffpat_dim.y, dimz2);
fprintf(out, "#S %e %e %e\n", ddx, ddy, ddz);
fprintf(out, "#E\n");
/* write data */
len = diffpat_size / 2;
if (len!=fwrite(diffpat, sizeof(float), len, out))
error("Cannot write distribution");
fclose(out);
#ifdef TIMING
printf("Time for FFT: %f\n", time_fft.total);
#endif
}
}
#endif /* DIFFPAT */
|
omp-taskloop-yield.c | #include <omp.h>
#include <unistd.h>
#include <stdio.h>
#define THREADS 2
#define LEN 25
#define USLEEP 3
#define SLEEP 30
void long_task(void)
{
int k = 0;
for (k=0; k<1000; k++)
{
#pragma omp taskyield
usleep(USLEEP);
}
}
int main(void)
{
int j=0;
#pragma omp parallel num_threads(THREADS)
#pragma omp single
#pragma omp taskloop grainsize(1)
for (j=0; j<LEN; j++)
{
if (j<THREADS)
{
long_task();
} else {
usleep(SLEEP);
}
}
return 0;
}
|
temp.c |
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define IF if (v[i] > max)
#define NUM_EXEC 5
static void populate_vector(size_t N, int v[])
{
size_t i;
srand(1337);
for (i = 0; i < N; ++i)
v[i] = rand();
}
static double contention_test(size_t N, int T, const int v[])
{
size_t i;
int max = -1;
double t0, t1;
omp_lock_t mutex;
omp_init_lock(&mutex);
t0 = omp_get_wtime();
#pragma omp parallel for private(i) num_threads(T)
for (i = 0; i < N; ++i)
{
/*--*/
IF IF IF IF IF IF
/*--*/
{
omp_set_lock(&mutex);
{
if (v[i] > max)
max = v[i];
}
omp_unset_lock(&mutex);
}
}
t1 = omp_get_wtime();
return (t1-t0);
}
static double avg(int n, const double v[])
{
int i;
double sum = 0.;
for (i = 0; i < n; ++i)
sum += v[i];
return sum/n;
}
int main(int argc, char* argv[])
{
static double times[NUM_EXEC];
int* vector = NULL;
size_t N;
int T, i;
if (argc != 4)
{
fprintf(stdout, "Usage: %s <vector_size> <number_of_threads>\n ", argv[0]);
return 1;
}
N = atoll(argv[1]);
T = atoi(argv[2]);
int last = atoi(argv[3]) == 9;
vector = (int*) malloc(N*sizeof(int));
if (!vector)
{
fprintf(stdout, "Failed to allocate memory. Exiting...\n");
return 2;
}
populate_vector(N, vector);
/*throw away first execution*/
times[0] = contention_test(N, T, vector);
for (i = 0; i < NUM_EXEC; ++i)
times[i] = contention_test(N, T, vector);
if(!last)
fprintf(stdout, "%lf,",avg(NUM_EXEC, times));
else
fprintf(stdout, "%lf\n",avg(NUM_EXEC, times));
free(vector);
return 0;
}
|
mlp_openmp.c | /**
* @file app.c
* @brief Template for a Host Application Source File.
*
*/
#include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "../../support/common.h"
#include "../../support/timer.h"
#include "shared.hpp"
T **A;
T *B;
T *C;
// Create input arrays
static void init_data(T **A, T *B, unsigned int m_size, unsigned int n_size) {
for (unsigned int l = 0; l < NUM_LAYERS; l++)
for (unsigned int i = 0; i < m_size * n_size; i++) {
if (i % 100 < 98) {
A[l][i] = 0;
} else {
A[l][i] = (l + i) % 2;
}
}
for (unsigned int i = 0; i < n_size; i++) {
if (i % 50 < 48) {
B[i] = 0;
} else {
B[i] = i % 2;
}
}
}
// Compute output in the host
static void mlp_host(T *C, T **A, T *B, unsigned int m_size, unsigned int n_size) {
for (unsigned int nl = 0; nl < NUM_LAYERS; nl++) {
for (unsigned int m = 0; m < m_size; m++) {
C[m] = 0;
}
#pragma omp parallel for
for (unsigned int m = 0; m < m_size; m++) {
for (unsigned int n = 0; n < n_size; n++) {
C[m] += A[nl][m * n_size + n] * B[n];
}
C[m] = max(0, C[m]);
}
for (unsigned int n = 0; n < n_size; n++) {
B[n] = C[n];
}
}
}
static uint64_t mlp_host_sum(uint64_t n_size, uint64_t m_size) {
uint64_t sum = 0;
for (uint64_t m = 0; m < n_size; m++) {
sum += B[m];
}
return sum;
}
// Params ---------------------------------------------------------------------
typedef struct Params {
char *dpu_type;
int nr_of_ranks;
int input_size_n;
int input_size_m;
int n_warmup;
int n_reps;
} Params;
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -d <D> DPU type (default=fsim)"
"\n -r <R> # of ranks (default=2)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=8M elements)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.dpu_type = "fsim";
p.nr_of_ranks = 1;
p.input_size_n = 1 << 9;
p.input_size_m = 1 << 9;
p.n_warmup = 2;
p.n_reps = 3;
int opt;
while ((opt = getopt(argc, argv, "hd:r:i:")) >= 0) {
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 'd':
p.dpu_type = optarg;
break;
case 'r':
p.nr_of_ranks = atoi(optarg);
break;
case 'n':
p.input_size_n = atoi(optarg);
break;
case 'm':
p.input_size_m = atoi(optarg);
break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.nr_of_ranks > 0 && "Invalid # of ranks!");
return p;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
uint64_t n_size = 8192;
uint64_t m_size = 20480;
Timer timer;
A = malloc(NUM_LAYERS * sizeof(T *));
for (int l = 0; l < NUM_LAYERS; l++)
A[l] = malloc(n_size * m_size * sizeof(unsigned int));
B = malloc(m_size * sizeof(unsigned int));
C = malloc(m_size * sizeof(unsigned int));
// Create an input file with arbitrary data.
init_data(A, B, m_size, n_size);
start(&timer, 0, 1);
start_region();
mlp_host(C, A, B, n_size, m_size);
end_region();
stop(&timer, 0);
uint32_t sum = mlp_host_sum(n_size, m_size);
printf("Kernel ");
print(&timer, 0, 1);
printf("\n");
printf("SUM = %d \n", sum);
for (int l = 0; l < NUM_LAYERS; l++)
free(A[l]);
free(A);
free(B);
free(C);
return 0;
}
|
bucle-forModificado.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
#pragma omp parallel for
for (i=0; i<n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i);
return(0);
}
|
fold.c | /*
* minimum free energy
* RNA secondary structure prediction
*
* c Ivo Hofacker, Chrisoph Flamm
* original implementation by
* Walter Fontana
* g-quadruplex support and threadsafety
* by Ronny Lorenz
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/datastructures/basic.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/fold.h"
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
#define MAXSECTORS 500 /* dimension for a backtrack array */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* some backward compatibility stuff */
PRIVATE int backward_compat = 0;
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* wrappers for old API compatibility */
PRIVATE float
wrap_fold(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained,
int is_circular);
PRIVATE void
wrap_array_export(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p);
PRIVATE void
wrap_array_export_circ(int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p);
#endif
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*
* ###########################################
* # deprecated functions below #
*###########################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE void
wrap_array_export(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p)
{
/* make the DP arrays available to routines such as subopt() */
if (backward_compat_compound) {
*f5_p = backward_compat_compound->matrices->f5;
*c_p = backward_compat_compound->matrices->c;
*fML_p = backward_compat_compound->matrices->fML;
*fM1_p = backward_compat_compound->matrices->fM1;
*indx_p = backward_compat_compound->jindx;
*ptype_p = backward_compat_compound->ptype;
}
}
PRIVATE void
wrap_array_export_circ(int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p)
{
/* make the DP arrays available to routines such as subopt() */
if (backward_compat_compound) {
*Fc_p = backward_compat_compound->matrices->Fc;
*FcH_p = backward_compat_compound->matrices->FcH;
*FcI_p = backward_compat_compound->matrices->FcI;
*FcM_p = backward_compat_compound->matrices->FcM;
*fM2_p = backward_compat_compound->matrices->fM2;
}
}
PRIVATE float
wrap_fold(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_param_t *P;
float mfe;
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
P->model_details.circ = is_circular;
vc = vrna_fold_compound(string, &(P->model_details), VRNA_OPTION_DEFAULT);
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
/* handle hard constraints in pseudo dot-bracket format if passed via simple interface */
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
/* call mfe() function without backtracking */
mfe = vrna_mfe(vc, NULL);
/* backtrack structure */
if (structure && vc->params->model_details.backtrack) {
char *ss;
int length;
sect bt_stack[MAXSECTORS];
vrna_bp_stack_t *bp;
length = vc->length;
/* add a guess of how many G's may be involved in a G quadruplex */
bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2)));
vrna_backtrack_from_intervals(vc, bp, bt_stack, 0);
ss = vrna_db_from_bp_stack(bp, length);
strncpy(structure, ss, length + 1);
free(ss);
if (base_pair)
free(base_pair);
base_pair = bp;
}
return mfe;
}
PUBLIC void
free_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
}
}
PUBLIC float
fold_par(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained,
int is_circular)
{
return wrap_fold(string, structure, parameters, is_constrained, is_circular);
}
PUBLIC float
fold(const char *string,
char *structure)
{
return wrap_fold(string, structure, NULL, fold_constrained, 0);
}
PUBLIC float
circfold(const char *string,
char *structure)
{
return wrap_fold(string, structure, NULL, fold_constrained, 1);
}
PUBLIC void
initialize_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
update_fold_params(void)
{
vrna_md_t md;
if (backward_compat_compound && backward_compat) {
set_model_details(&md);
vrna_params_reset(backward_compat_compound, &md);
}
}
PUBLIC void
update_fold_params_par(vrna_param_t *parameters)
{
vrna_md_t md;
if (backward_compat_compound && backward_compat) {
if (parameters) {
vrna_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_params_reset(backward_compat_compound, &md);
}
}
}
PUBLIC void
export_fold_arrays(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p)
{
wrap_array_export(f5_p, c_p, fML_p, fM1_p, indx_p, ptype_p);
}
PUBLIC void
export_fold_arrays_par(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p,
vrna_param_t **P_p)
{
wrap_array_export(f5_p, c_p, fML_p, fM1_p, indx_p, ptype_p);
if (backward_compat_compound)
*P_p = backward_compat_compound->params;
}
PUBLIC void
export_circfold_arrays(int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p,
int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p)
{
wrap_array_export(f5_p, c_p, fML_p, fM1_p, indx_p, ptype_p);
wrap_array_export_circ(Fc_p, FcH_p, FcI_p, FcM_p, fM2_p);
}
PUBLIC void
export_circfold_arrays_par(int *Fc_p,
int *FcH_p,
int *FcI_p,
int *FcM_p,
int **fM2_p,
int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **indx_p,
char **ptype_p,
vrna_param_t **P_p)
{
wrap_array_export(f5_p, c_p, fML_p, fM1_p, indx_p, ptype_p);
wrap_array_export_circ(Fc_p, FcH_p, FcI_p, FcM_p, fM2_p);
if (backward_compat_compound)
*P_p = backward_compat_compound->params;
}
PUBLIC char *
backtrack_fold_from_pair(char *sequence,
int i,
int j)
{
char *structure = NULL;
unsigned int length = 0;
vrna_bp_stack_t *bp = NULL;
sect bt_stack[MAXSECTORS]; /* stack of partial structures for backtracking */
if (sequence) {
length = strlen(sequence);
bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (1 + length / 2));
} else {
vrna_message_warning("backtrack_fold_from_pair: "
"no sequence given");
return NULL;
}
bt_stack[1].i = i;
bt_stack[1].j = j;
bt_stack[1].ml = 2;
bp[0].i = 0; /* ??? this is set by backtrack anyway... */
vrna_backtrack_from_intervals(backward_compat_compound, bp, bt_stack, 1);
structure = vrna_db_from_bp_stack(bp, length);
/* backward compatibitlity stuff */
if (base_pair)
free(base_pair);
base_pair = bp;
return structure;
}
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
PUBLIC int
HairpinE(int size,
int type,
int si1,
int sj1,
const char *string)
{
vrna_param_t *P = backward_compat_compound->params;
int energy;
energy = (size <= 30) ? P->hairpin[size] :
P->hairpin[30] + (int)(P->lxc * log((size) / 30.));
if (tetra_loop) {
if (size == 4) {
/* check for tetraloop bonus */
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(P->Tetraloops, tl)))
return P->Tetraloop_E[(ts - P->Tetraloops) / 7];
}
if (size == 6) {
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 8);
if ((ts = strstr(P->Hexaloops, tl)))
return energy = P->Hexaloop_E[(ts - P->Hexaloops) / 9];
}
if (size == 3) {
char tl[6] = {
0, 0, 0, 0, 0, 0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(P->Triloops, tl)))
return P->Triloop_E[(ts - P->Triloops) / 6];
if (type > 2) /* neither CG nor GC */
energy += P->TerminalAU; /* penalty for closing AU GU pair IVOO??
* sind dass jetzt beaunuesse oder mahlnuesse (vorzeichen?)*/
return energy;
}
}
energy += P->mismatchH[type][si1][sj1];
return energy;
}
/*---------------------------------------------------------------------------*/
PUBLIC int
oldLoopEnergy(int i,
int j,
int p,
int q,
int type,
int type_2)
{
vrna_param_t *P = backward_compat_compound->params;
short *S1 = backward_compat_compound->sequence_encoding;
/* compute energy of degree 2 loop (stack bulge or interior) */
int n1, n2, m, energy;
n1 = p - i - 1;
n2 = j - q - 1;
if (n1 > n2) {
m = n1;
n1 = n2;
n2 = m;
} /* so that n2>=n1 */
if (n2 == 0) {
energy = P->stack[type][type_2]; /* stack */
} else if (n1 == 0) {
/* bulge */
energy = (n2 <= MAXLOOP) ? P->bulge[n2] :
(P->bulge[30] + (int)(P->lxc * log(n2 / 30.)));
#if STACK_BULGE1
if (n2 == 1)
energy += P->stack[type][type_2];
#endif
} else {
/* interior loop */
if ((n1 + n2 == 2) && (james_rule)) {
/* special case for loop size 2 */
energy = P->int11[type][type_2][S1[i + 1]][S1[j - 1]];
} else {
energy = (n1 + n2 <= MAXLOOP) ? (P->internal_loop[n1 + n2]) :
(P->internal_loop[30] + (int)(P->lxc * log((n1 + n2) / 30.)));
#if NEW_NINIO
energy += MIN2(MAX_NINIO, (n2 - n1) * P->ninio[2]);
#else
m = MIN2(4, n1);
energy += MIN2(MAX_NINIO, ((n2 - n1) * P->ninio[m]));
#endif
energy += P->mismatchI[type][S1[i + 1]][S1[j - 1]] +
P->mismatchI[type_2][S1[q + 1]][S1[p - 1]];
}
}
return energy;
}
/*--------------------------------------------------------------------------*/
PUBLIC int
LoopEnergy(int n1,
int n2,
int type,
int type_2,
int si1,
int sj1,
int sp1,
int sq1)
{
vrna_param_t *P = backward_compat_compound->params;
/* compute energy of degree 2 loop (stack bulge or interior) */
int nl, ns, energy;
if (n1 > n2) {
nl = n1;
ns = n2;
} else {
nl = n2;
ns = n1;
}
if (nl == 0)
return P->stack[type][type_2]; /* stack */
if (ns == 0) {
/* bulge */
energy = (nl <= MAXLOOP) ? P->bulge[nl] :
(P->bulge[30] + (int)(P->lxc * log(nl / 30.)));
if (nl == 1) {
energy += P->stack[type][type_2];
} else {
if (type > 2)
energy += P->TerminalAU;
if (type_2 > 2)
energy += P->TerminalAU;
}
return energy;
} else {
/* interior loop */
if (ns == 1) {
if (nl == 1) /* 1x1 loop */
return P->int11[type][type_2][si1][sj1];
if (nl == 2) {
/* 2x1 loop */
if (n1 == 1)
energy = P->int21[type][type_2][si1][sq1][sj1];
else
energy = P->int21[type_2][type][sq1][si1][sp1];
return energy;
} else {
/* 1xn loop */
energy = (nl + 1 <= MAXLOOP) ? (P->internal_loop[nl + 1]) :
(P->internal_loop[30] + (int)(P->lxc * log((nl + 1) / 30.)));
energy += MIN2(MAX_NINIO, (nl - ns) * P->ninio[2]);
energy += P->mismatch1nI[type][si1][sj1] +
P->mismatch1nI[type_2][sq1][sp1];
return energy;
}
} else if (ns == 2) {
if (nl == 2) {
/* 2x2 loop */
return P->int22[type][type_2][si1][sp1][sq1][sj1];
} else if (nl == 3) {
/* 2x3 loop */
energy = P->internal_loop[5] + P->ninio[2];
energy += P->mismatch23I[type][si1][sj1] +
P->mismatch23I[type_2][sq1][sp1];
return energy;
}
}
{
/* generic interior loop (no else here!)*/
energy = (n1 + n2 <= MAXLOOP) ? (P->internal_loop[n1 + n2]) :
(P->internal_loop[30] + (int)(P->lxc * log((n1 + n2) / 30.)));
energy += MIN2(MAX_NINIO, (nl - ns) * P->ninio[2]);
energy += P->mismatchI[type][si1][sj1] +
P->mismatchI[type_2][sq1][sp1];
}
}
return energy;
}
#endif
|
commondraw.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : commondraw.c
* Description : common drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_commondraw_c__
#define __libaroma_commondraw_c__
#include <aroma_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Function : libaroma_draw_limit
* Return Value: int
* Descriptions: get limit position
*/
int libaroma_draw_limit(
int x, int max) {
if (x<0) {
return 0;
}
if (x>=max) {
return max-1;
}
return x;
} /* End of libaroma_draw_limit */
/*
* Function : libaroma_draw_limited
* Return Value: byte
* Descriptions: is draw position limited/overflow
*/
byte libaroma_draw_limited(
int x, int max) {
return ((x < 0) || (x >= max) ? 1 : 0);
} /* End of libaroma_draw_limited */
/*
* Function : libaroma_draw_ex2
* Return Value: byte
* Descriptions: canvas drawing
*/
byte libaroma_draw_ex2(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sw, int sh,
byte draw_flags,
byte opacity,
byte ismask,
word maskcolor
) {
if (src == NULL) {
ALOGW("libaroma_draw_ex1 src = NULL");
return 0;
}
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if ((dx >= dst->w) || (dy >= dst->h)) {
ALOGW("libaroma_draw_ex1 dx/dy bigger that destination size");
return 0;
}
if (opacity==0) {
return 1; /* No Need Any Process */
}
byte useAlpha = (draw_flags&LIBAROMA_DRAW_WITH_ALPHA)?1:0;
byte noDither = (draw_flags&LIBAROMA_DRAW_NODITHER)?1:0;
byte toBlack = (draw_flags&LIBAROMA_DRAW_TO_BLACK)?1:0;
/* fix positions */
if (sx < 0) {
dx += abs(sx);
sw -= abs(sx);
sx = 0;
}
if (sy < 0) {
dy += abs(sy);
sh -= abs(sy);
sy = 0;
}
/* fix size */
if (sw + sx >= src->w) {
sw -= (sw + sx) - src->w;
}
if (sh + sy >= src->h) {
sh -= (sh + sy) - src->h;
}
if ((sw <= 0) || (sh <= 0)) {
ALOGW("libaroma_draw_ex1 calculated sw/sh < 1");
return 0;
}
/* set calculated units */
int sr_w = sw;
int sr_h = sh;
int sr_x = sx;
int sr_y = sy;
int ds_x = dx;
int ds_y = dy;
/* fix destination */
if (dx < 0) {
int ndx = abs(dx);
sr_x += abs(ndx);
sr_w -= ndx;
ds_x = 0;
}
if (dy < 0) {
int ndy = abs(dy);
sr_y += ndy;
sr_h -= ndy;
ds_y = 0;
}
/* fix source size */
if (sr_w + dx > dst->w) {
sr_w -= (sr_w + dx) - dst->w;
}
if (sr_h + dy > dst->h) {
sr_h -= (sr_h + dy) - dst->h;
}
/* prepare loop data */
int y;
int pos_sr_x = sr_x * 2;
int pos_ds_x = ds_x * 2;
int pos_sc_w = src->l * 2;
int pos_dc_w = dst->l * 2;
int copy_sz = sr_w * 2;
byte * src_data = ((byte *) src->data);
byte * dst_data = ((byte *) dst->data);
if (useAlpha) {
if (src->alpha == NULL) {
useAlpha = 0;
}
}
if (!useAlpha){
ismask=0;
}
if (opacity == 0xff) {
if (useAlpha) {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data+((ds_y + y)*pos_dc_w)+pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, dst_mem, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (noDither){
libaroma_alpha_px(
sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
libaroma_alpha_px_line(
y, sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
}
}
}
else {
/* Copy Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
memcpy(
dst_data + ((ds_y + y)*pos_dc_w) + pos_ds_x,
src_data + ((sr_y + y)*pos_sc_w) + pos_sr_x,
copy_sz
);
}
}
}
else {
if (useAlpha) {
/* Blend Destination with Source */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp tmp_dst = (wordp) malloc(sr_w * 2);
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, tmp_dst, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (toBlack){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_black(sr_w, dst_mem, tmp_dst, opacity);
}
else if (noDither){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
libaroma_alpha_px_line(
y, sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
}
free(tmp_dst);
}
}
else {
/* Blend Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
wordp src_mem = (wordp) (src_data + ((sr_y + y) * pos_sc_w) + pos_sr_x);
if (toBlack){
libaroma_alpha_black(sr_w, dst_mem, src_mem, opacity);
}
else if (noDither){
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
else{
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
}
}
}
return 1;
} /* End of libaroma_draw_ex1 */
/*
* Function : libaroma_draw_rect
* Return Value: byte
* Descriptions: draw filled rectangle
*/
byte libaroma_draw_rect(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word color, byte alpha) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
/* check for valid x/y */
if (x > dst->w || y > dst->h){
ALOGW("libaroma_draw_rect x/y (%d/%d) greater than dest size (%dx%d)", x, y, dst->w, dst->h);
return 0;
}
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
/* fixed size */
w = x2 - x;
h = y2 - y;
/* draw */
int dy;
if (alpha == 0xff) {
wordp datapos = dst->data + x;
#ifdef libaroma_memset16
for (dy = y; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
libaroma_color_set(linepos,color,w);
}
#else
int w2=w*2;
wordp firstline = datapos + (y * dst->l);
libaroma_color_set(firstline, color, w);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y+1; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
memcpy(linepos,firstline,w2);
}
#endif
}
else {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y; dy < y2; dy++) {
wordp linepos = dst->data + (dy * dst->l) + x;
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(dy, w, linepos, linepos, color, alpha);
#else
libaroma_alpha_rgba_fill(w, linepos, linepos, color, alpha);
#endif
}
}
return 1;
} /* End of libaroma_draw_rect */
/*
* Function : libaroma_draw_rectangle
* Return Value: byte
* Descriptions: draw non-filled rectangle
*/
byte libaroma_draw_rectangle(
LIBAROMA_CANVASP dest,
int x, int y, int w, int h,
int thickness, int roundsize,
word color, byte alpha, byte aliased
){
if (dest==NULL) dest=libaroma_fb()->canvas;
if (w<1 || h<1) return 0;
int cornerw=0;
int cornerh=0;
if (roundsize){
cornerw=cornerh=roundsize;
int maxsz=(MIN(w, h))/2;
if (cornerw>maxsz) cornerw=maxsz;
if (cornerh>maxsz) cornerh=maxsz;
}
byte sizeOverflows=0;
if (x+thickness > ((x+w)-(thickness*2))){
sizeOverflows = 1;
}
if (y+thickness > ((y+h)-(thickness*2))){
sizeOverflows = 1;
}
if (sizeOverflows){ //just draw a filled rectangle
libaroma_draw_rect(dest, x, y, w, h, color, alpha);
return 1;
}
if (w > (cornerw*2) && h > (cornerh*2)){ /* draw lines */
/* top */
libaroma_draw_rect(dest, x+cornerw, y, w-(cornerw*2), thickness, color, alpha);
/* left */
libaroma_draw_rect(dest, x, y+cornerh, thickness, h-(cornerh*2), color, alpha);
/* right */
libaroma_draw_rect(dest, (x+w)-thickness, y+cornerh, thickness, h-(cornerh*2), color, alpha);
/* bottom */
libaroma_draw_rect(dest, x+cornerw, (y+h)-thickness, w-(cornerw*2), thickness, color, alpha);
}
if (cornerw && cornerh){ /* draw corners */
/* top, left */
libaroma_draw_arc(dest, cornerw+1, cornerh+1, cornerw+1, cornerh+1, thickness, 180.0, 270.0, color, alpha, 0, aliased?0.5:0.0);
/* top, right */
libaroma_draw_arc(dest, (x+w)-cornerw, cornerh+1, cornerw, cornerh+1, thickness, 270.0, 360.0, color, alpha, 0, aliased?0.5:0.0);
/* no antialiasing makes height more precise in bottom corners */
if (!aliased) cornerh+=1;
/* bottom, left */
libaroma_draw_arc(dest, cornerw+1, (y+h)-cornerh-1, cornerw+1, cornerh+1, thickness, 90.0, 180.0, color, alpha, 0, aliased?0.5:0.0);
/* bottom, right */
libaroma_draw_arc(dest, (x+w)-cornerw, (y+h)-cornerh-1, cornerw, cornerh+1, thickness, 0.0, 90.0, color, alpha, 0, aliased?0.5:0.0);
}
return 1;
} /* End of libaroma_draw_rectangle */
/*
* Function : libaroma_draw_skewed_rect
* Return Value: byte
* Descriptions: draw skewed rectangle
*/
byte libaroma_draw_skewed_rect(
LIBAROMA_CANVASP dest,
LIBAROMA_CANVASP src,
int x0, int y0, int x1, int y1,
int x2, int y2, int x3, int y3,
word color
){
if (!dest) dest=libaroma_fb()->canvas;
int ret=0;
LIBAROMA_PATHP path=libaroma_path(x0, y0);
if (path==NULL){
ALOGW("libaroma_draw_skewed_rect failed to alloc path");
return 0;
}
if (!libaroma_path_add(path, x1, y1)) goto exit;
if (!libaroma_path_add(path, x2, y2)) goto exit;
if (!libaroma_path_add(path, x3, y3)) goto exit;
if (!libaroma_path_draw_filled(dest, src, path, color, 0xFF, 0, 1)){
ALOGW("libaroma_draw_skewed_rect failed to draw path");
goto exit;
}
ret=1;
exit:
libaroma_path_free(path);
return ret;
} /* End of libaroma_draw_skewed_rect */
/*
* Function : libaroma_draw_pixel
* Return Value: byte
* Descriptions: draw pixel
*/
byte libaroma_draw_pixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
word color,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
wordp d=&dest->data[dest->l * dy + dx];
if (alpha==0xff){
*d = color;
}
else if (alpha>0){
*d = libaroma_alpha(*d,color,alpha);
}
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_alphapixel
* Return Value: byte
* Descriptions: set alpha pixel
*/
byte libaroma_draw_alphapixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
if (dest->alpha==NULL){
return 0;
}
dest->alpha[dest->l * dy + dx] = alpha;
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_copypixel
* Return Value: byte
* Descriptions: copy pixel color
*/
byte libaroma_draw_copypixel(
LIBAROMA_CANVASP dest, LIBAROMA_CANVASP src,
int dx, int dy, int sx, int sy
){
if (!dest || !src) return 0;
if ((dx<0)||(dy<0)||(sx<0)||(sy<0)||
(dy>=dest->h)||(dx>=dest->w)||(sy>=src->h)||(sx>=src->w)
){
return 0;
}
dest->data[(dest->l*dy)+dx] = src->data[(src->l*sy)+sx];
dest->data[(dest->l*dy)+dx+1] = src->data[(src->l*sy)+sx+1];
dest->data[(dest->l*dy)+dx+2] = src->data[(src->l*sy)+sx+2];
return 1;
} /* End of libaroma_draw_copypixel */
/*
* Function : libaroma_draw_copyalphapixel
* Return Value: byte
* Descriptions: copy pixel alpha
*/
byte libaroma_draw_copyalphapixel(
LIBAROMA_CANVASP dest, LIBAROMA_CANVASP src,
int dx, int dy, int sx, int sy
){
if (!dest || !src) return 0;
if (!src->alpha) return 0;
if (!dest->alpha) //initialize alpha for target canvas
dest->alpha = calloc(dest->s, 1);
if ((dx<0)||(dy<0)||(sx<0)||(sy<0)||
(dy>=dest->h)||(dx>=dest->w)||(sy>=src->h)||(sx>=src->w)
){
return 0;
}
dest->alpha[(dest->l*dy)+dx] = src->alpha[(src->l*sy)+sx];
return 1;
} /* End of libaroma_draw_copyalphapixel */
/*
* Function : libaroma_draw_line
* Return Value: byte
* Descriptions: draw line
*/
byte libaroma_draw_line(
LIBAROMA_CANVASP dest,
int x0, int y0, int x1, int y1,
float wd,
word color,
byte alpha,
byte is_mask){
#define __DRAW_PIX(x,y,a) \
if (is_mask==1){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(alpha,MAX(0, alpha * (1-(a)))) \
)) { break; } \
} \
else if (is_mask==2){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(0xff,MAX(0, 255 * (a))) \
)) { break; } \
} \
else{ \
if (!libaroma_draw_pixel( \
dest, x, y, color, \
MIN(0xff,MAX(0, alpha * (1-(a)))) \
)) { break; } \
}
if (!dest){
dest=libaroma_fb()->canvas;
}
int dx = abs(x1-x0), sx = x0 < x1 ? 1 : -1;
int dy = abs(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = dx-dy, e2, x2, y2;
float ed = dx+dy == 0 ? 1 : sqrt((float)dx*dx+(float)dy*dy);
for (wd = (wd+1)/2; ; ) {
if ((x0>=0)&&(y0>=0)){
__DRAW_PIX(x0,y0,
abs(err-dx+dy)/ed-wd+1
);
}
e2 = err; x2 = x0;
if (2*e2 >= -dx) {
for (e2 += dy, y2 = y0; e2 < ed*wd && (y1 != y2 || dx > dy); e2 += dx){
if ((x0>=0)&&(y2>=0)){
__DRAW_PIX(x0, y2+=sy,
abs(e2)/ed-wd+1
);
}
}
if (x0==x1){
break;
}
e2 = err; err -= dy; x0 += sx;
}
if (2*e2 <= dy){
for (e2 = dx-e2; e2 < ed*wd && (x1 != x2 || dx < dy); e2 += dy){
if ((x2>=0)&&(y0>=0)){
__DRAW_PIX(x2 += sx, y0,
abs(e2)/ed-wd+1
);
}
}
if (y0==y1){
break;
}
err += dx; y0 += sy;
}
}
#undef __DRAW_PIX
return 1;
} /* End of libaroma_draw_line */
/*
* Function : libaroma_draw_subpixel
* Return Value: byte
* Descriptions: draw subpixel
*/
byte libaroma_draw_subpixel(
LIBAROMA_CANVASP dest,
float dx, float dy, float tickness,
word color,
byte alpha){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<=-1)||(dy<=-1)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
int x, y;
float px, py;
float ht=(tickness-1.0)/2;
for (y=floor(dy-ht);y<=ceil(dy+ht);y++){
if ((y>=0)&&(y<dest->h)){
int pos = y * dest->l;
for (x=floor(dx-ht);x<=ceil(dx+ht);x++){
if ((x>=0)&&(x<dest->w)){
px = abs((dx<x)?dx-x:x-dx)/ht;
py = abs((dy<y)?dy-y:y-dy)/ht;
int alp = MIN(0xff,MAX((1-(px+py)) * 0xff,0));
wordp d = dest->data + pos + x;
word cl = libaroma_alpha(*d, color, alp);
if (alpha!=0xff){
cl=libaroma_alpha(*d,cl,alpha);
}
*d=cl;
}
}
}
}
return 1;
} /* End of libaroma_draw_subpixel */
/*
* Function : libaroma_draw_mask_circle
* Return Value: byte
* Descriptions: draw masked circle
*/
byte libaroma_draw_mask_circle(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (src == NULL) {
return 0;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
int psy = sy + y;
if ((pdy<dst->h)&&(pdy>=0)&&(psy<src->h)&&(psy>=0)){
int pos_d = pdy * dst->l;
int pos_s = psy * src->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (sx-x<0){
w-=abs(sx-x);
x=sx;
}
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
int sdx = sx-x;
if (sdx+w>src->w){
w=src->w-sdx;
}
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
wordp sd = src->data + pos_s + sdx;
if (alpha==0xff){
memcpy(dd,sd,w*2);
}
else{
//libaroma_alpha_const_line(pdy,w,dd,dd,sd,alpha);
libaroma_alpha_const(w,dd,dd,sd,alpha);
}
}
}
}
return 1;
} /* End of libaroma_draw_mask_circle */
/*
* Function : libaroma_draw_circle
* Return Value: byte
* Descriptions: draw filled circle
*/
byte libaroma_draw_circle(
LIBAROMA_CANVASP dst,
word color,
int dx, int dy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
if (alpha==0xff){
libaroma_color_set(dd,color,w);
}
else{
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(pdy,w,dd, dd,color,alpha);
#else
libaroma_alpha_rgba_fill(w,dd, dd,color,alpha);
#endif
}
}
}
}
return 1;
} /* End of libaroma_draw_circle */
/*
* Function : libaroma_draw_alpha_circle
* Return Value: byte
* Descriptions: draw alpha-filled circle
*/
byte libaroma_draw_alpha_circle(
LIBAROMA_CANVASP dst,
int dx, int dy,
int sz,
byte alpha){
if (sz<2){
return 1;
}
int radius = sz/2;
int rad = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
int curx =pos_d + pdx;
int j;
for (j=0; j<w; j++){
dst->alpha[curx+j]=alpha;
}
}
}
}
return 1;
} /* End of libaroma_draw_alpha_circle */
/*
* Function : libaroma_draw_line_width
* Return Value: byte
* Descriptions: draw line with width
*/
byte libaroma_draw_line_width(
LIBAROMA_CANVASP dest,
float x1, float y1, float x2, float y2,
float wd,
word color,
byte alpha,
byte is_mask,
float aliasing){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
float angle = atan2(y2 - y1, x2 - x1);
float t2sina1 = wd / 2 * sin(angle);
float t2cosa1 = wd / 2 * cos(angle);
float t2sina2 = wd / 2 * sin(angle);
float t2cosa2 = wd / 2 * cos(angle);
LIBAROMA_PATHP path=libaroma_path(x1 + t2sina1, y1 - t2cosa1);
libaroma_path_add(path, x2 + t2sina2, y2 - t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x1 - t2sina1, y1 + t2cosa1);
libaroma_path_add(path, x1 + t2sina1, y1 - t2cosa1);
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_line_width */
/*
* Function : _libaroma_draw_arc_findpoint
* Return Value: byte
* Descriptions: find arc point
*/
byte _libaroma_draw_arc_findpoint(
LIBAROMA_PATHP path,
float dx, float dy,
float radius_w, float radius_h,
float xt0, float yt0,
float xt1, float yt1,
double start, double end
){
double radian;
if (start==end){
return 0;
}
else if (start<end){
radian = start + ((end - start) / 2.0);
}
else{
radian = end + ((start - end) / 2.0);
}
float xt = dx + radius_w*cos(radian);
float yt = dy + radius_h*sin(radian);
if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt0, yt0, xt, yt,
start, radian
);
}
libaroma_path_add(path, xt, yt);
if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt, yt, xt1, yt1,
radian, end
);
}
libaroma_path_add(path, xt1, yt1);
return 1;
} /* End of _libaroma_draw_arc_findpoint */
/*
* Function : libaroma_draw_arc
* Return Value: byte
* Descriptions: draw arc into canvas
*/
byte libaroma_draw_arc(
LIBAROMA_CANVASP dest,
float dx, float dy,
float radius_w, float radius_h,
float width,
float start_angle, float end_angle,
word color,byte alpha,byte is_mask,float aliasing
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
if (start_angle==end_angle){
/* no draw needed */
return 1;
}
/*
start_angle=fmod(start_angle,360);
end_angle=fmod(end_angle,360);
*/
/*
start_angle=360-start_angle;
end_angle=360-end_angle;
*/
if (start_angle>end_angle){
float tmp=start_angle;
start_angle=end_angle;
end_angle=tmp;
}
double start_radian = start_angle* __PI / 180.0;
double end_radian = end_angle * __PI / 180.0;
float start_x = dx + radius_w*cos(start_radian);
float start_y = dy + radius_h*sin(start_radian);
float end_x = dx + radius_w*cos(end_radian);
float end_y = dy + radius_h*sin(end_radian);
LIBAROMA_PATHP path=libaroma_path(start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
start_radian, end_radian
);
libaroma_path_add(path, end_x, end_y);
if ((width>0)&&(width<radius_w/2)&&(width<radius_h/2)) {
radius_w -= width;
radius_h -= width;
/* roll */
start_x = dx + radius_w*cos(end_radian);
start_y = dy + radius_h*sin(end_radian);
end_x = dx + radius_w*cos(start_radian);
end_y = dy + radius_h*sin(start_radian);
libaroma_path_add(path, start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
end_radian, start_radian
);
}
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_arc */
#ifdef __cplusplus
}
#endif
#endif /* __libaroma_commondraw_c__ */
|
GB_binop__rdiv_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int64)
// A*D function (colscale): GB (_AxD__rdiv_int64)
// D*A function (rowscale): GB (_DxB__rdiv_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int64)
// C=scalar+B GB (_bind1st__rdiv_int64)
// C=scalar+B' GB (_bind1st_tran__rdiv_int64)
// C=A+scalar GB (_bind2nd__rdiv_int64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT64 || GxB_NO_RDIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
orphaned-directives.c | #include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
static double a[1000];
static void init(void)
{
int i=0;
i=i+5;
#pragma omp for
for (i=0;i<1000;i++)
{
a[i]=(double)i/2.0;
}
}
int main(void){
#pragma omp parallel
{
init();
}
return 0;
}
|
arrays.c | /**
* module with tools for manipulating arrays
* Julien Lesgourgues, 18.04.2010
*/
#include "arrays.h"
/**
* Called by thermodynamics_init(); perturb_sources().
*/
int array_derive(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
class_test((index_dydx == index_x) || (index_dydx == index_y),
errmsg,
"output column %d must differ from input columns %d and %d",index_dydx,index_x,index_y);
dx2=array[1*n_columns+index_x]-array[0*n_columns+index_x];
dy2=array[1*n_columns+index_y]-array[0*n_columns+index_y];
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x];
dy2 = array[(i+1)*n_columns+index_y]-array[i*n_columns+index_y];
class_test((dx1 == 0) || (dx2 == 0),
errmsg,
"stop to avoid division by zero");
weight1 = dx2*dx2;
weight2 = dx1*dx1;
array[i*n_columns+index_dydx] = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1)
array[(i-1)*n_columns+index_dydx] = 2.*dy1/dx1 - array[i*n_columns+index_dydx];
if (i == n_lines-2)
array[(i+1)*n_columns+index_dydx] = 2.*dy2/dx2 - array[i*n_columns+index_dydx];
}
return _SUCCESS_;
}
int array_derive_spline(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_dydx == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_dydx,
index_y);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dydx] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array_splined[(i+1)*n_columns+index_y] + 2. * array_splined[i*n_columns+index_y]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dydx] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array_splined[(n_lines-1)*n_columns+index_y] + array_splined[(n_lines-2)*n_columns+index_y]);
return _SUCCESS_;
}
int array_derive_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_dy,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_ddy == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_y);
class_test(index_ddy == index_dy,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_dy);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dy] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array[(i+1)*n_columns+index_ddy] + 2. * array[i*n_columns+index_ddy]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dy] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array[(n_lines-1)*n_columns+index_ddy] + array[(n_lines-2)*n_columns+index_ddy]);
return _SUCCESS_;
}
int array_derive1_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
ErrorMsg errmsg) {
int i=1;
double dxp,dxm,dyp,dym;
if (n_lines < 2) {
sprintf(errmsg,"%s(L:%d) routine called with n_lines=%d, should be at least 2",__func__,__LINE__,n_lines);
return _FAILURE_;
}
dxp = x_array[2] - x_array[1];
dxm = x_array[0] - x_array[1];
dyp = *(array+2*n_columns+index_y) - *(array+1*n_columns+index_y);
dym = *(array+0*n_columns+index_y) - *(array+1*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+1*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
for (i=2; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
}
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
return _SUCCESS_;
}
int array_derive2_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
int index_ddy,
ErrorMsg errmsg) {
int i;
double dxp,dxm,dyp,dym;
for (i=1; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+i*n_columns+index_ddy) = 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
}
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * *(array+1*n_columns+index_ddy);
*(array+0*n_columns+index_ddy) = *(array+1*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * *(array+(n_lines-2)*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_ddy) = *(array+(n_lines-2)*n_columns+index_ddy);
return _SUCCESS_;
}
int array_integrate_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_inty,
ErrorMsg errmsg) {
int i;
double h;
*(array+0*n_columns+index_inty) = 0.;
for (i=0; i < n_lines-1; i++) {
h = (x_array[i+1]-x_array[i]);
*(array+(i+1)*n_columns+index_inty) = *(array+i*n_columns+index_inty) +
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_derive_two(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
int index_ddydxdx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
if ((index_dydx == index_x) || (index_dydx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_dydx,index_x,index_y);
return _FAILURE_;
}
dx2=*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x);
dy2=*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y);
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = *(array+(i+1)*n_columns+index_x)-*(array+i*n_columns+index_x);
dy2 = *(array+(i+1)*n_columns+index_y)-*(array+i*n_columns+index_y);
weight1 = dx2*dx2;
weight2 = dx1*dx1;
if ((dx1 == 0.) && (dx2 == 0.)) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dydx) = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
*(array+i*n_columns+index_ddydxdx) = (dx2*dy1-dx1*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1) {
*(array+(i-1)*n_columns+index_dydx) = 2.*dy1/dx1 - *(array+i*n_columns+index_dydx);
*(array+(i-1)*n_columns+index_ddydxdx) = *(array+i*n_columns+index_ddydxdx);
}
if (i == n_lines-2) {
*(array+(i+1)*n_columns+index_dydx) = 2.*dy2/dx2 - *(array+i*n_columns+index_dydx);
*(array+(i+1)*n_columns+index_dydx) = *(array+i*n_columns+index_ddydxdx);
}
}
return _SUCCESS_;
}
int array_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
if (n_lines < 3) {
sprintf(errmsg,"%s(L:%d) n_lines=%d, while routine needs n_lines >= 3",__func__,__LINE__,n_lines);
return _FAILURE_;
}
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+1*n_columns+index_x)));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x))
-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
/ (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (*(array+(i+1)*n_columns+index_x) - *(array+i*n_columns+index_x))
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
u[i]= (6.0 * u[i] /
(*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-2)*n_columns+index_x)));
qn=0.5;
un =
(3./(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_line_to_line(
double * x, /* vector of size x_size */
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(x[1]-x[0])*(x[1]-x[0])*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(x[1] - x[0]))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]);
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (x[i+1] - x[i])
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (x[i] - x[i-1]);
u[i]= (6.0 * u[i] /
(x[i+1] - x[i-1])
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-1])*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(x[n_lines-2]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-2]));
qn=0.5;
un =
(3./(x[n_lines-1] - x[n_lines-2]))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(x[n_lines-1] - x[n_lines-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[1*y_size+index_y]-y_array[0*y_size+index_y])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[2*y_size+index_y]-y_array[0*y_size+index_y]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[1*y_size+index_y]-y_array[0*y_size+index_y])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[(index_x-1)*y_size+index_y] + 2.0;
ddy_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[(index_x+1)*y_size+index_y] - y_array[index_x*y_size+index_y])
/ (x[index_x+1] - x[index_x])
- (y_array[index_x*y_size+index_y] - y_array[(index_x-1)*y_size+index_y])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[(x_size-2)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[(x_size-3)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[(x_size-1)*y_size+index_y] - y_array[(x_size-2)*y_size+index_y])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = ddy_array[index_x*y_size+index_y] *
ddy_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_logspline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddlny_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[2*y_size+index_y])-log(y_array[0*y_size+index_y])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlny_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))/
(log(x[1]) - log(x[0]))-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddlny_array[(index_x-1)*y_size+index_y] + 2.0;
ddlny_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(log(y_array[(index_x+1)*y_size+index_y]) - log(y_array[index_x*y_size+index_y]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_x*y_size+index_y]) - log(y_array[(index_x-1)*y_size+index_y]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-1]))*
(log(y_array[(x_size-2)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y]))-
(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*
(log(y_array[(x_size-3)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y])))/
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-2])));
qn[index_y]=0.5;
un[index_y]=
(3./(log(x[x_size-1]) - log(x[x_size-2])))*
(dy_last-(log(y_array[(x_size-1)*y_size+index_y]) - log(y_array[(x_size-2)*y_size+index_y]))/
(log(x[x_size-1]) - log(x[x_size-2])));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddlny_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = ddlny_array[index_x*y_size+index_y] *
ddlny_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_spline_table_columns(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
class_test(x[2]-x[0]==0.,
errmsg,
"x[2]=%g, x[0]=%g, stop to avoid seg fault",x[2],x[0]);
class_test(x[1]-x[0]==0.,
errmsg,
"x[1]=%g, x[0]=%g, stop to avoid seg fault",x[1],x[0]);
class_test(x[2]-x[1]==0.,
errmsg,
"x[2]=%g, x[1]=%g, stop to avoid seg fault",x[2],x[1]);
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_columns2(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
#pragma omp parallel \
shared(x,x_size,y_array,y_size,ddy_array,spline_mode,p,qn,un,u) \
private(index_y,index_x,sig,dy_first,dy_last)
{
#pragma omp for schedule (dynamic)
for (index_y=0; index_y < y_size; index_y++) {
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+0] = 0.0;
u[0*y_size+index_y] = 0.0;
}
else {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+0] = -0.5;
u[0*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
if (spline_mode == _SPLINE_NATURAL_) {
qn[index_y]=un[index_y]=0.0;
}
else {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_size-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x] = (6.0 * u[index_x] /
(x[index_x+1] - x[index_x-1])
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn=0.5;
un=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_logspline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_stop-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddlogy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[index_y*x_size+2])-log(y_array[index_y*x_size+0])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlogy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))/
(log(x[1]) - log(x[0]))-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_stop-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
p = sig * ddlogy_array[index_y*x_size+(index_x-1)] + 2.0;
ddlogy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(log(y_array[index_y*x_size+(index_x+1)]) - log(y_array[index_y*x_size+index_x]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_y*x_size+index_x]) - log(y_array[index_y*x_size+(index_x-1)]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x] = (6.0 * u[index_x] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-3])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-2)])-log(y_array[index_y*x_size+(x_stop-1)]))-
(log(x[x_stop-2])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-3)])-log(y_array[index_y*x_size+(x_stop-1)])))/
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(x[x_stop-3])-log(x[x_stop-2])));
qn=0.5;
un=
(3./(log(x[x_stop-1]) - log(x[x_stop-2])))*
(dy_last-(log(y_array[index_y*x_size+(x_stop-1)]) - log(y_array[index_y*x_size+(x_stop-2)]))/
(log(x[x_stop-1]) - log(x[x_stop-2])));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_stop-1;
ddlogy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddlogy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_stop-2; index_x >= 0; index_x--) {
ddlogy_array[index_y*x_size+index_x] = ddlogy_array[index_y*x_size+index_x] *
ddlogy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_integrate_all_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
*result = 0;
for (i=0; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
int array_integrate_all_trapzd_or_spline(
double * array,
int n_columns,
int n_lines,
int index_start_spline,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
if ((index_start_spline<0) || (index_start_spline>=n_lines)) {
sprintf(errmsg,"%s(L:%d) index_start_spline outside of range",__func__,__LINE__);
return _FAILURE_;
}
*result = 0;
/* trapezoidal integration till given index */
for (i=0; i < index_start_spline; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.;
}
/* then, spline integration */
for (i=index_start_spline; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_integrate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_int_y_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y_dx == index_x) || (index_int_y_dx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_int_y_dx,index_x,index_y);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y_dx)=sum;
}
return _SUCCESS_;
}
/**
* Called by thermodynamics_init().
*/
int array_integrate_ratio(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y1,
int index_y2,
int index_int_y1_over_y2_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y1_over_y2_dx == index_x) || (index_int_y1_over_y2_dx == index_y1) || (index_int_y1_over_y2_dx == index_y2)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d, %d and %d",__func__,__LINE__,index_int_y1_over_y2_dx,index_x,index_y1,index_y2);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y1_over_y2_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y1) / *(array+i*n_columns+index_y2)
+ *(array+(i-1)*n_columns+index_y1) / *(array+(i-1)*n_columns+index_y2))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y1_over_y2_dx)=sum;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (*(array+inf*n_columns+index_x) < *(array+sup*n_columns+index_x)){
if (x < *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline(
double * __restrict__ x_array,
int n_lines,
double * __restrict__ array,
double * __restrict__ array_splined,
int n_columns,
double x,
int * __restrict__ last_index,
double * __restrict__ result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_linear(
double * x_array,
int n_lines,
double * array,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_logspline(
double * x_array,
int n_lines,
double * array,
double * array_logsplined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) = exp(
a * log(array[inf*n_columns+i]) +
b * log(array[sup*n_columns+i]) +
((a*a*a-a)* array_logsplined[inf*n_columns+i] +
(b*b*b-b)* array_logsplined[sup*n_columns+i])*h*h/6.);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_size-2] || x < x_array[0]) {
/*interpolate/extrapolate linearly y as a function of x*/
h = x_array[x_size-1] - x_array[x_size-2];
b = (x-x_array[x_size-2])/h;
a = 1-b;
*y = a * y_array[index_y * x_size + (x_size-2)] +
b * y_array[index_y * x_size + (x_size-1)];
}
else {
/*interpolate y as a function of x with a spline*/
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_logspline_loglinear_one_column(
double * x_array,
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_stop-1]) {
/*interpolate/extrapolate linearly ln(y) as a function of ln(x)*/
h = log(x_array[x_stop-1]) - log(x_array[x_stop-2]);
b = (log(x)-log(x_array[x_stop-2]))/h;
a = 1-b;
/* *y = exp(a * log(y_array[index_y * x_size + (x_stop-2)]) + */
/* b * log(y_array[index_y * x_size + (x_stop-1)])); */
*y = exp(log(y_array[index_y * x_size + (x_stop-1)])
+(log(x)-log(x_array[x_stop-1]))
*((log(y_array[index_y * x_size + (x_stop-1)])-log(y_array[index_y * x_size + (x_stop-2)]))/h
+h/6.*(ddlogy_array[index_y * x_size + (x_stop-2)]+2.*ddlogy_array[index_y * x_size + (x_stop-1)])));
}
else {
/*interpolate ln(y) as a function of ln(x) with a spline*/
inf=0;
sup=x_stop-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
*y = exp(a * log(y_array[index_y * x_size + inf]) +
b * log(y_array[index_y * x_size + sup]) +
((a*a*a-a)* ddlogy_array[index_y * x_size + inf] +
(b*b*b-b)* ddlogy_array[index_y * x_size + sup])*h*h/6.);
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y(x), when x and y are two columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_one_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
int index_y,
double * result,
ErrorMsg errmsg) {
int inf,sup;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
*result = *(array+inf*n_columns+index_y) * (1.-weight) + *(array+sup*n_columns+index_y) * weight;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably very close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_closeby(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double h,a,b;
/*
if (*last_index < 0) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d < 0",__func__,__LINE__,*last_index);
return _FAILURE_;
}
if (*last_index > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d > %d",__func__,__LINE__,*last_index,n_lines-1);
return _FAILURE_;
}
*/
inf = *last_index;
class_test(inf<0 || inf>(n_lines-1),
errmsg,
"*lastindex=%d out of range [0:%d]\n",inf,n_lines-1);
while (x < x_array[inf]) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > x_array[sup]) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close (but maybe not so close) to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_hunt(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i,inc;
double h,a,b;
inc=1;
if (x >= x_array[*last_index]) {
if (x > x_array[n_lines-1]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
/* try closest neighboor upward */
inf = *last_index;
sup = inf + inc;
if (x > x_array[sup]) {
/* hunt upward */
while (x > x_array[sup]) {
inf = sup;
inc += 1;
sup += inc;
if (sup > n_lines-1) {
sup = n_lines-1;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
else {
if (x < x_array[0]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
/* try closest neighboor downward */
sup = *last_index;
inf = sup - inc;
if (x < x_array[inf]) {
/* hunt downward */
while (x < x_array[inf]) {
sup = inf;
inc += 1;
inf -= inc;
if (inf < 0) {
inf = 0;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+i*n_lines+inf) * (1.-weight)
+ weight * *(array_y+i*n_lines+sup) ;
return _SUCCESS_;
}
/**
* Same as array_interpolate_two, but with order of indices exchanged in array_y
*/
int array_interpolate_two_bis(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+inf*n_columns_y+i) * (1.-weight)
+ weight * *(array_y+sup*n_columns_y+i) ;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two_arrays_one_column(
double * array_x, /* assumed to be a vector (i.e. one column array) */
double * array_y,
int n_columns_y,
int index_y, /* between 0 and (n_columns_y-1) */
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
ErrorMsg errmsg) {
int inf,sup,mid;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf] < array_x[sup]){
class_test(x < array_x[inf],
errmsg,
"x=%e < x_min=%e",x,array_x[inf]);
class_test(x > array_x[sup],
errmsg,
"x=%e > x_max=%e",x,array_x[sup]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
class_test(x < array_x[sup],
errmsg,
"x=%e < x_min=%e",x,array_x[sup]);
class_test(x > array_x[inf],
errmsg,
"x=%e > x_max=%e",x,array_x[inf]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-array_x[inf])/(array_x[sup]-array_x[inf]);
*result = array_y[index_y*n_lines+inf] * (1.-weight)
+ weight * array_y[index_y*n_lines+sup];
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_interpolate_equal(
double * array,
int n_columns,
int n_lines,
double x,
double x_min,
double x_max,
double * result,
ErrorMsg errmsg) {
int index_minus,i;
double x_step,x_minus,weight;
if (x < x_min) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_min=%e",__func__,__LINE__,x,x_min);
return _FAILURE_;
}
if (x > x_max) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_max=%e",__func__,__LINE__,x,x_max);
return _FAILURE_;
}
x_step = (x_max-x_min)/(n_lines-1);
index_minus = (int)((x-x_min)/x_step);
x_minus = index_minus * x_step;
weight = (x-x_minus) / x_step;
for (i=0; i<n_columns; i++)
result[i] = *(array+n_columns*index_minus+i)*(1.-weight)
+ *(array+n_columns*(index_minus+1)+i)*weight;
return _SUCCESS_;
}
/**
* cubic interpolation of array with equally space abscisses
*/
int array_interpolate_cubic_equal(
double x0,
double dx,
double *yarray,
int Nx,
double x,
double * result,
ErrorMsg errmsg) {
int i;
double frac;
class_test((dx > 0 && (x<x0 || x>x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0,x0+dx*(Nx-1));
class_test((dx < 0 && (x>x0 || x<x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0+dx*(Nx-1),x0);
i = (int)floor((x-x0)/dx);
if (i<1) i=1;
if (i>Nx-3) i=Nx-3;
frac = (x-x0)/dx-i;
yarray += i-1;
*result=-yarray[0]*frac*(1.-frac)*(2.-frac)/6.
+yarray[1]*(1.+frac)*(1.-frac)*(2.-frac)/2.
+yarray[2]*(1.+frac)*frac*(2.-frac)/2.
+yarray[3]*(1.+frac)*frac*(frac-1.)/6.;
return _SUCCESS_;
}
int array_interpolate_parabola(double x1,
double x2,
double x3,
double x,
double y1,
double y2,
double y3,
double * y,
double * dy,
double * ddy,
ErrorMsg errmsg) {
double a,b,c;
/*
a x_i**2 + b x_i + c = y_i
a (x1**2-x2**2) + b (x1-x2) = y1-y2
a (x3**2-x2**2) + b (x3-x2) = y3-y2
a (x1**2-x2**2)(x3**2-x2**2) + b (x1-x2)(x3**2-x2**2) = (y1-y2)(x3**2-x2**2)
a (x3**2-x2**2)(x1**2-x2**2) + b (x3-x2)(x1**2-x2**2) = (y3-y2)(x1**2-x2**2)
b = [(y1-y2)(x3**2-x2**2) - (y3-y2)(x1**2-x2**2)]/(x1-x2)(x3-x2)(x3-x1)
*/
b = ((y1-y2)*(x3-x2)*(x3+x2) - (y3-y2)*(x1-x2)*(x1+x2))/(x1-x2)/(x3-x2)/(x3-x1);
a = (y1-y2-b*(x1-x2))/(x1-x2)/(x1+x2);
c = y2 - b*x2 - a*x2*x2;
*y = a*x*x + b*x + c;
*dy = 2.*a*x + b;
*ddy = 2.*a;
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_integrate_all(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
double *result) {
int i;
double sum;
sum=0.;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
}
*result = sum;
return _SUCCESS_;
}
int array_smooth_trg(double * array,
int k_size,
int starting_k,
int eta_size,
int index_eta,
int radius, /*3, 5 or 7 */
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
double *coeff;
smooth=malloc(k_size*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
class_calloc(coeff,2*radius+1,sizeof(double),errmsg);
switch(radius){
case 3:
weigth = 21;
coeff[0] = -2;
coeff[1] = 3;
coeff[2] = 6;
coeff[3] = 7;
coeff[4] = 6;
coeff[5] = 3;
coeff[6] = -2;
break;
case 4:
weigth = 231;
coeff[0] = -21;
coeff[1] = 14;
coeff[2] = 39;
coeff[3] = 54;
coeff[4] = 59;
coeff[5] = 54;
coeff[6] = 39;
coeff[7] = 14;
coeff[8] = -21;
break;
case 5:
weigth = 429;
coeff[0] = -36;
coeff[1] = 9;
coeff[2] = 44;
coeff[3] = 69;
coeff[4] = 84;
coeff[5] = 89;
coeff[6] = 84;
coeff[7] = 69;
coeff[8] = 44;
coeff[9] = 9;
coeff[10] = -36;
break;
case 6:
weigth = 143;
coeff[0] = -11;
coeff[1] = 0;
coeff[2] = 9;
coeff[3] = 16;
coeff[4] = 21;
coeff[5] = 24;
coeff[6] = 25;
coeff[7] = 24;
coeff[8] = 21;
coeff[9] = 16;
coeff[10] = 9;
coeff[11] = 0;
coeff[12] = -11;
break;
case 7:
weigth = 1105;
coeff[0] = -78;
coeff[1] = -13;
coeff[2] = 42;
coeff[3] = 87;
coeff[4] = 122;
coeff[5] = 147;
coeff[6] = 162;
coeff[7] = 167;
coeff[8] = 162;
coeff[9] = 147;
coeff[10] = 122;
coeff[11] = 87;
coeff[12] = 42;
coeff[13] = -13;
coeff[14] = -78;
break;
/* case 8: */
default:
class_stop(errmsg,"Non valid radius %d: please chose between 3 4 5 or 6\n",radius);
weigth=0;
break;
}
for (i=starting_k; i<k_size-radius; i++) {
smooth[i]=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,k_size-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += coeff[j-jmin]*array[j+k_size*index_eta];
}
smooth[i] /= weigth;
}
for (i=starting_k; i<k_size-radius; i++)
array[i+k_size*index_eta] = smooth[i];
free(smooth);
free(coeff);
return _SUCCESS_;
}
int array_smooth(double * array,
int n_columns,
int n_lines,
int index, /** from 0 to (n_columns-1) */
int radius,
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
smooth=malloc(n_lines*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
for (i=0; i<n_lines; i++) {
smooth[i]=0.;
weigth=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,n_lines-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += array[j*n_columns+index];
weigth += 1.;
}
smooth[i] /= weigth;
}
for (i=0; i<n_lines; i++)
array[i*n_columns+index] = smooth[i];
free(smooth);
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, xhen x is in gorwing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_weights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point, w would normally be 0. */
if (n==1){
w_trapz[0] = 0.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[1]-x[0]);
w_trapz[n-1] = 0.5*(x[n-1]-x[n-2]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i+1]-x[i-1]);
}
}
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, when x is in decreasing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_mweights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point. */
if (n==1){
w_trapz[0] = 1.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[0]-x[1]);
w_trapz[n-1] = 0.5*(x[n-2]-x[n-1]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i-1]-x[i+1]);
}
}
return _SUCCESS_;
}
/**
* Compute integral of function using trapezoidal method.
*
* @param integrand Input: The function we are integrating.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_integral(
double * __restrict__ integrand,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
/**
* Compute convolution integral of product of two functions using trapezoidal method.
*
* @param integrand1 Input: Function 1.
* @param integrand2 Input: Function 2.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_convolution(
double * __restrict__ integrand1,
double * __restrict__ integrand2,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand1[i]*integrand2[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
|
GB_nvec_nonempty.c | //------------------------------------------------------------------------------
// GB_nvec_nonempty: count the number of non-empty vectors
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All pending tuples are ignored. If a vector has all zombies it is still
// counted as non-empty.
#include "GB.h"
GB_PUBLIC
int64_t GB_nvec_nonempty // return # of non-empty vectors
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
if (GB_IS_FULL (A) || GB_IS_BITMAP (A))
{
// A is full or bitmap; nvec_nonempty depends only on the dimensions
return ((A->vlen == 0) ? 0 : A->vdim) ;
}
if (GB_nnz (A) == 0)
{
// A is sparse or hypersparse, with no entries
return (0) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// count the non-empty columns
//--------------------------------------------------------------------------
int64_t nvec_nonempty = 0 ;
const int64_t *restrict Ap = A->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nvec_nonempty)
for (k = 0 ; k < anvec ; k++)
{
if (Ap [k] < Ap [k+1]) nvec_nonempty++ ;
}
ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (nvec_nonempty) ;
}
|
GB_binop__bxnor_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16)
// A*D function (colscale): GB (_AxD__bxnor_uint16)
// D*A function (rowscale): GB (_DxB__bxnor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16)
// C=scalar+B GB (_bind1st__bxnor_uint16)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint16)
// C=A+scalar GB (_bind2nd__bxnor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void Hello_thread(void);
int main(int argc, char *argv[])
{
int thread_count=4;
if(argc < 2)
printf("usage: hello_omp <number threads>\n");
else
{
sscanf(argv[1], "%d", &thread_count);
printf("Will use %d threads for test\n", thread_count);
}
#pragma omp parallel num_threads(thread_count)
Hello_thread();
return 0;
}
void Hello_thread(void)
{
int my_rank = omp_get_thread_num();
int thread_count = omp_get_num_threads();
printf("Hello from OMP thread %d of %d\n", my_rank, thread_count);
}
|
GB_unop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint16
// op(A') function: GB_unop_tran__identity_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint16
(
int64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cgels.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgels.c, normal z -> c, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear systems
* involving an m-by-n matrix A using a QR or LQ factorization of A. It
* is assumed that A has full rank. The following options are provided:
*
* # trans = PlasmaNoTrans and m >= n: find the least squares solution of an
* overdetermined system, i.e., solve the least squares problem:
* minimize || B - A*X ||.
*
* # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an
* underdetermined system A * X = B.
*
* Several right-hand side vectors B and solution vectors X can be handled in a
* single call; they are stored as the columns of the m-by-nrhs right-hand side
* matrix B and the n-by-nrhs solution matrix X.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrices B and X. nrhs >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization as
* returned by plasma_cgeqrf;
* if m < n, A is overwritten by details of its LQ factorization as
* returned by plasma_cgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
* @param[in,out] pB
* On entry, pointer to the m-by-nrhs matrix B of right-hand side
* vectors, stored columnwise;
* On exit, if return value = 0, B is overwritten by the solution
* vectors, stored columnwise:
* if m >= n, rows 1 to N of B contain the least squares solution
* vectors; the residual sum of squares for the solution in each column
* is given by the sum of squares of the modulus of elements n+1 to m
* in that column;
* if m < n, rows 1 to n of B contain the minimum norm solution
* vectors;
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_cgels
* @sa plasma_cgels
* @sa plasma_dgels
* @sa plasma_sgels
* @sa plasma_cgeqrf
* @sa plasma_cgeqrs
*
******************************************************************************/
int plasma_cgels(plasma_enum_t trans,
int m, int n, int nrhs,
plasma_complex32_t *pA, int lda,
plasma_desc_t *T,
plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
return PlasmaErrorNotSupported;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -6;
}
if (ldb < imax(1, imax(m, n))) {
plasma_error("illegal value of ldb");
return -9;
}
// quick return
if (imin(m, imin(n, nrhs)) == 0) {
for (int i = 0; i < imax(m, n); i++)
for (int j = 0; j < nrhs; j++)
pB[j*ldb+i] = 0.0;
return PlasmaSuccess;
}
// Tune parameters.
if (plasma->tuning) {
if (m < n)
plasma_tune_gelqf(plasma, PlasmaComplexFloat, m, n);
else
plasma_tune_geqrf(plasma, PlasmaComplexFloat, m, n);
}
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
imax(m, n), nrhs, 0, 0, imax(m, n),
nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_cgels(PlasmaNoTrans,
A, *T,
B, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gels
*
* Solves overdetermined or underdetermined linear
* system of equations using the tile QR or the tile LQ factorization.
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: the linear system involves A
* (the only supported option for now).
*
* @param[in,out] A
* Descriptor of matrix A stored in the tile layout.
* On exit,
* if m >= n, A is overwritten by details of its QR factorization
* as returned by plasma_cgeqrf;
* if m < n, A is overwritten by details of its LQ factorization
* as returned by plasma_cgelqf.
*
* @param[out] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by
* plasma_cgeqrf or plasma_cgelqf.
*
* @param[in,out] B
* Descriptor of matrix B.
* On entry, right-hand side matrix B in the tile layout.
* On exit, solution matrix X in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For QR/LQ factorizations used in GELS, it contains preallocated
* space for tau and work arrays.
* Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cgels
* @sa plasma_omp_cgels
* @sa plasma_omp_dgels
* @sa plasma_omp_sgels
*
******************************************************************************/
void plasma_omp_cgels(plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T,
plasma_desc_t B, plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (trans != PlasmaNoTrans) {
plasma_error("only PlasmaNoTrans supported");
plasma_request_fail(sequence, request, PlasmaErrorNotSupported);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid descriptor T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid descriptor B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.n == 0) {
// Zero matrix B.
plasma_pclaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request);
return;
}
//===============================
// Solve using QR factorization.
//===============================
if (A.m >= A.n) {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pcgeqrf_tree(A, T, work, sequence, request);
}
else {
plasma_pcgeqrf(A, T, work, sequence, request);
}
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pcunmqr_tree(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_pcunmqr(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
plasma_pctrsm(PlasmaLeft, PlasmaUpper,
PlasmaNoTrans, PlasmaNonUnit,
1.0,
plasma_desc_view(A, 0, 0, A.n, A.n),
plasma_desc_view(B, 0, 0, A.n, B.n),
sequence, request);
}
//===============================
// Solve using LQ factorization.
//===============================
else {
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pcgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pcgelqf(A, T, work, sequence, request);
}
// Zero the trailing block of the right-hand-side matrix.
// B has less rows than X.
plasma_pclaset(PlasmaGeneral, 0.0, 0.0,
plasma_desc_view(B, A.m, 0, A.n-A.m, B.n),
sequence, request);
// Solve L * Y = B.
plasma_pctrsm(
PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit,
1.0, plasma_desc_view(A, 0, 0, A.m, A.m),
plasma_desc_view(B, 0, 0, A.m, B.n),
sequence, request);
// Find X = Q^H * Y.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pcunmlq_tree(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
else {
plasma_pcunmlq(PlasmaLeft, Plasma_ConjTrans,
A, T, B,
work, sequence, request);
}
}
}
|
instruments.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file instruments.h
* \brief Measurement functions
*/
#ifndef INSTRUMENTS_H_
#define INSTRUMENTS_H_
namespace qpp {
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param dims Dimensions of the multi-partite system
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar
* or column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, const std::vector<idx>& dims) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rphi))
throw exception::ZeroSize("qpp::ip()");
// check zero-size
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check column vector
if (!internal::check_cvector(rphi))
throw exception::MatrixNotCvector("qpp::ip()");
// check column vector
if (!internal::check_cvector(rpsi))
throw exception::MatrixNotCvector("qpp::ip()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ip()");
// check that subsys are valid
if (!internal::check_subsys_match_dims(subsys, dims))
throw exception::SubsysMismatchDims("qpp::ip()");
// check that dims match psi column vector
if (!internal::check_dims_match_cvect(dims, rpsi))
throw exception::DimsMismatchCvector("qpp::ip()");
// check that subsys match pho column vector
std::vector<idx> subsys_dims(subsys.size());
for (idx i = 0; i < subsys.size(); ++i)
subsys_dims[i] = dims[subsys[i]];
if (!internal::check_dims_match_cvect(subsys_dims, rphi))
throw exception::DimsMismatchCvector("qpp::ip()");
// END EXCEPTION CHECKS
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx D = static_cast<idx>(rpsi.rows());
idx Dsubsys_bar = D / Dsubsys;
idx n = dims.size();
idx n_subsys = subsys.size();
idx n_subsys_bar = n - n_subsys;
idx Cdims[maxn];
idx Csubsys[maxn];
idx Cdimssubsys[maxn];
idx Csubsys_bar[maxn];
idx Cdimssubsys_bar[maxn];
std::vector<idx> subsys_bar = complement(subsys, n);
std::copy(std::begin(subsys_bar), std::end(subsys_bar),
std::begin(Csubsys_bar));
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
}
for (idx i = 0; i < n_subsys; ++i) {
Csubsys[i] = subsys[i];
Cdimssubsys[i] = dims[subsys[i]];
}
for (idx i = 0; i < n_subsys_bar; ++i) {
Cdimssubsys_bar[i] = dims[subsys_bar[i]];
}
auto worker = [&](idx b) noexcept->typename Derived::Scalar {
idx Cmidxrow[maxn];
idx Cmidxrowsubsys[maxn];
idx Cmidxcolsubsys_bar[maxn];
/* get the col multi-indexes of the complement */
internal::n2multiidx(b, n_subsys_bar, Cdimssubsys_bar,
Cmidxcolsubsys_bar);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys_bar; ++k) {
Cmidxrow[Csubsys_bar[k]] = Cmidxcolsubsys_bar[k];
}
typename Derived::Scalar result = 0;
for (idx a = 0; a < Dsubsys; ++a) {
/* get the row multi-indexes of the subsys */
internal::n2multiidx(a, n_subsys, Cdimssubsys, Cmidxrowsubsys);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys; ++k) {
Cmidxrow[Csubsys[k]] = Cmidxrowsubsys[k];
}
// compute the row index
idx i = internal::multiidx2n(Cmidxrow, n, Cdims);
result += std::conj(rphi(a)) * rpsi(i);
}
return result;
}; /* end worker */
dyn_col_vect<typename Derived::Scalar> result(Dsubsys_bar);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx m = 0; m < Dsubsys_bar; ++m)
result(m) = worker(m);
return result;
}
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param d Subsystem dimensions
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar
* or column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, idx d = 2) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::ip()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rpsi.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return ip(phi, psi, subsys, dims);
}
// full measurements
/**
* \brief Measures the state vector or density operator \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the Kraus operators
if (Ks.size() == 0)
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Ks[0].rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates(Ks.size());
//************ density matrix ************//
if (internal::check_square_mat(rA)) // square matrix
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = cmat::Zero(rA.rows(), rA.rows());
cmat tmp = Ks[i] * rA * adjoint(Ks[i]); // un-normalized;
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0)
outstates[i] = tmp / prob[i]; // normalized
}
}
//************ ket ************//
else if (internal::check_cvector(rA)) // column vector
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = ket::Zero(rA.rows());
ket tmp = Ks[i] * rA; // un-normalized;
// probability
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0)
outstates[i] = tmp / std::sqrt(prob[i]); // normalized
}
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the state vector or density matrix \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks) {
return measure(A, std::vector<cmat>(Ks));
}
/**
* \brief Measures the state vector or density matrix \a A in the orthonormal
* basis specified by the unitary matrix \a U
*
* \param A Eigen expression
* \param U Unitary matrix whose columns represent the measurement basis vectors
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& U) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the unitary basis matrix U
if (!internal::check_nonzero_size(U))
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(U))
throw exception::MatrixNotSquare("qpp::measure()");
if (U.rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
std::vector<cmat> Ks(U.rows());
for (idx i = 0; i < static_cast<idx>(U.rows()); ++i)
Ks[i] = U.col(i) * adjoint(U.col(i));
return measure(rA, Ks);
}
// partial measurements
/**
* \brief Measures the part \a subsys of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx D = prod(std::begin(dims), std::end(dims));
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx Dsubsys_bar = D / Dsubsys;
// check the Kraus operators
if (Ks.size() == 0)
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Dsubsys != static_cast<idx>(Ks[0].rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates(Ks.size(),
cmat::Zero(Dsubsys_bar, Dsubsys_bar));
//************ density matrix ************//
if (internal::check_square_mat(rA)) // square matrix
{
// check that dims match rho matrix
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
for (idx i = 0; i < Ks.size(); ++i) {
cmat tmp = apply(rA, Ks[i], target, dims);
tmp = ptrace(tmp, target, dims);
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
outstates[i] = tmp / prob[i];
}
}
}
//************ ket ************//
else if (internal::check_cvector(rA)) // column vector
{
// check that dims match psi column vector
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
for (idx i = 0; i < Ks.size(); ++i) {
ket tmp = apply(rA, Ks[i], target, dims);
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
tmp /= std::sqrt(prob[i]);
outstates[i] = ptrace(tmp, target, dims);
}
}
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
const std::vector<idx>& dims) {
return measure(A, std::vector<cmat>(Ks), target, dims);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, Ks, target, dims);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
idx d = 2) {
return measure(A, std::vector<cmat>(Ks), target, d);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, const std::vector<idx>& dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
// check the matrix V
if (!internal::check_nonzero_size(V))
throw exception::ZeroSize("qpp::measure()");
if (Dsubsys != static_cast<idx>(V.rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
// number of basis elements or number of rank-1 projectors
idx M = static_cast<idx>(V.cols());
//************ ket ************//
if (internal::check_cvector(rA)) {
const ket& rpsi = A.derived();
// check that dims match state vector
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
std::vector<double> prob(M); // probabilities
std::vector<cmat> outstates(M); // resulting states
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < M; ++i)
outstates[i] =
ip(static_cast<const ket&>(V.col(i)), rpsi, target, dims);
for (idx i = 0; i < M; ++i) {
double tmp = norm(outstates[i]);
prob[i] = tmp * tmp;
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result m
outstates[i] /= tmp;
}
}
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
//************ density matrix ************//
else if (internal::check_square_mat(rA)) {
// check that dims match rho matrix
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
std::vector<cmat> Ks(M);
for (idx i = 0; i < M; ++i)
Ks[i] = V.col(i) * adjoint(V.col(i));
return measure(rA, Ks, target, dims);
}
//************ Exception: not ket nor density matrix ************//
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target.
* The measurement is destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \return Tuple of: 1. Result of the measurement, 2.
* Vector of outcome probabilities, and 3. Vector of post-measurement
* normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, V, target, dims);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
std::vector<idx> dims) {
// typename std::remove_const<
// typename Eigen::MatrixBase<Derived>::EvalReturnType
// >::type rA = A.derived();
dyn_mat<typename Derived::Scalar> rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure_seq()");
// check square matrix or column vector
if (internal::check_square_mat(rA)) {
// check that dims match rho matrix
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure_seq()");
} else if (internal::check_cvector(rA)) {
// check that dims match psi column vector
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure_seq()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure_seq()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure_seq()");
// END EXCEPTION CHECKS
std::vector<idx> result;
double prob = 1;
// sort target in decreasing order,
// the order of measurements does not matter
std::sort(std::begin(target), std::end(target), std::greater<idx>{});
//************ density matrix or column vector ************//
while (target.size() > 0) {
auto tmp = measure(rA, Gates::get_instance().Id(dims[target[0]]),
{target[0]}, dims);
result.emplace_back(std::get<0>(tmp));
prob *= std::get<1>(tmp)[std::get<0>(tmp)];
rA = std::get<2>(tmp)[std::get<0>(tmp)];
// remove the subsystem
dims.erase(std::next(std::begin(dims), target[0]));
target.erase(std::begin(target));
}
// order result in increasing order with respect to target
std::reverse(std::begin(result), std::end(result));
return std::make_tuple(result, prob, rA);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure_seq()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure_seq(rA, target, dims);
}
} /* namespace qpp */
#endif /* INSTRUMENTS_H_ */
|
GB_subassign_09.c | //------------------------------------------------------------------------------
// GB_subassign_09: C(I,J)<M,repl> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
// M: present
// Mask_comp: false
// C_replace: true
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap or full
#include "GB_unused.h"
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_09
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in M+S must be examined. All entries in S
// are modified: if M(i,j)=1 then S(i,j) is used to write to the
// corresponding entry in C. If M(i,j) is not present, or zero, then the
// entry in C is cleared (because of C_replace). If S(i,j) is not present,
// and M(i,j)=1, then the scalar is inserted into C. The only case that
// can be skipped is if neither S nor M is present. As a result, this
// method need not traverse all of IxJ. It can limit its traversal to the
// pattern of M+S.
// Method 09 and Method 11 are very similar.
//--------------------------------------------------------------------------
// Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all M+S
GB_SUBASSIGN_TWO_SLICE (M, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (Sfound && !mij)
{
// S (i,j) is present but M (i,j) is false
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && mij)
{
// S (i,j) present and M (i,j) is true
GB_C_S_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_C_S_LOOKUP ;
if (GB_mcast (Mx, pM, msize))
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list S (:,j) has entries. List M (:,j) exhausted.
while (pS < pS_end)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-----------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iM = GBI (Mi, pM, Mvlen) ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
XT_ICD_update.c | /* ============================================================================
* Copyright (c) 2013 K. Aditya Mohan (Purdue University)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of K. Aditya Mohan, Purdue
* University, nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
#include "XT_Constants.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "allocate.h"
#include "randlib.h"
#include <time.h>
#include "XT_AMatrix.h"
#include "XT_Profile.h"
#include "XT_Structures.h"
#include "XT_IOMisc.h"
#include "XT_NHICD.h"
#include "omp.h"
#include "XT_MPI.h"
#include <mpi.h>
#include "XT_VoxUpdate.h"
#include "XT_ForwardProject.h"
#include "XT_MPIIO.h"
#include "XT_Debug.h"
#include "XT_OffsetError.h"
/*computes the location of (i,j,k) th element in a 1D array*/
int32_t array_loc_1D (int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k)
{
return (i*N_j*N_k + j*N_k + k);
}
/*finds the maximum in a array 'array_in' with number of elements being 'num'*/
int32_t find_max(int32_t* array_in, int32_t num)
{
int32_t i, maxnum;
maxnum = array_in[0];
for (i=1; i<num; i++)
if (array_in[i] > maxnum)
maxnum = array_in[i];
return(maxnum);
}
/*converts the value 'val' to hounsfield units and returns it*/
Real_t convert2Hounsfield (Real_t val)
{
Real_t slope, c;
slope=(HOUNSFIELD_WATER_MAP-HOUNSFIELD_AIR_MAP)/(WATER_MASS_ATT_COEFF*WATER_DENSITY-AIR_MASS_ATT_COEFF*AIR_DENSITY)/HFIELD_UNIT_CONV_CONST;
c=-slope*(AIR_MASS_ATT_COEFF*AIR_DENSITY*HFIELD_UNIT_CONV_CONST);
return (slope*val + c);
}
/*Computes the qGGMRF spatial prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Spatial_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_S_Q)/(ScannedObjectPtr->C_S + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_S_Q_P));
}
/*Computes the qGGMRF temporal prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Temporal_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_T_Q)/(ScannedObjectPtr->C_T + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_T_Q_P));
}
/*Computes the qGGMRF spatial prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Spatial_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
Real_t temp1,temp2,temp3;
temp1=pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_S_Q_P);
temp2=pow(fabs(delta),MRF_Q - 1);
temp3 = ScannedObjectPtr->C_S + temp1;
if(delta < 0)
return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
else
{
return ((temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
}
}
/*Computes the qGGMRF temporal prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/
Real_t CE_QGGMRF_Temporal_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
Real_t temp1,temp2,temp3;
temp1 = pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_T_Q_P);
temp2 = pow(fabs(delta),MRF_Q - 1);
temp3 = ScannedObjectPtr->C_T + temp1;
if(delta < 0)
return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
else
{
return ((temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3)));
}
}
/*Computes the qGGMRF spatial prior second derivative at delta = 0*/
Real_t CE_QGGMRF_Spatial_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return MRF_Q/(TomoInputsPtr->Sigma_S_Q*ScannedObjectPtr->C_S);
}
/*Computes the qGGMRF spatial prior second derivative at delta = 0*/
Real_t CE_QGGMRF_Temporal_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
return MRF_Q/(TomoInputsPtr->Sigma_T_Q*ScannedObjectPtr->C_T);
}
/*Computes the voxel update and returns it. V is the present value of voxel.
THETA1 and THETA2 are the values used in voxel update. Spatial_Nhood and Time_Nhood gives the
values of voxels in the neighborhood of V. Time_BDFlag and Spatial_BDFlag are masks which determine
whether a neighbor should be included in the neighorhood or not.*/
Real_t CE_FunctionalSubstitution(Real_t V, Real_t THETA1, Real_t THETA2, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_t Spatial_Nhood[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], Real_t Time_Nhood[NHOOD_TIME_MAXDIM-1], bool Spatial_BDFlag[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], bool Time_BDFlag[NHOOD_TIME_MAXDIM-1])
{
Real_t u,temp1=0,temp2=0,temp_const,RefValue=0,Delta0;
Real_t QGGMRF_Params;
int32_t i,j,k;
RefValue = V;
/*Need to Loop this for multiple iterations of substitute function*/
for (i=0; i < NHOOD_Y_MAXDIM; i++)
for (j=0; j < NHOOD_X_MAXDIM; j++)
for (k=0; k < NHOOD_Z_MAXDIM; k++)
{
if(Spatial_BDFlag[i][j][k] == true && (i != (NHOOD_Y_MAXDIM-1)/2 || j != (NHOOD_X_MAXDIM-1)/2 || k != (NHOOD_Z_MAXDIM-1)/2))
{
Delta0 = (RefValue - Spatial_Nhood[i][j][k]);
if(Delta0 != 0)
QGGMRF_Params = CE_QGGMRF_Spatial_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0);
else {
QGGMRF_Params = CE_QGGMRF_Spatial_SecondDerivative(ScannedObjectPtr,TomoInputsPtr);
}
temp_const = TomoInputsPtr->Spatial_Filter[i][j][k]*QGGMRF_Params;
temp1 += temp_const*Spatial_Nhood[i][j][k];
temp2 += temp_const;
}
}
for (i=0; i < NHOOD_TIME_MAXDIM - 1; i++)
{
if(Time_BDFlag[i] == true)
{
Delta0 = (RefValue - Time_Nhood[i]);
if(Delta0 != 0)
QGGMRF_Params = CE_QGGMRF_Temporal_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0);
else {
QGGMRF_Params = CE_QGGMRF_Temporal_SecondDerivative(ScannedObjectPtr,TomoInputsPtr);
}
temp_const = TomoInputsPtr->Time_Filter[0]*QGGMRF_Params;
temp1 += temp_const*Time_Nhood[i];
temp2 += temp_const;
}
}
u=(temp1+ (THETA2*V) - THETA1)/(temp2 + THETA2);
RefValue = RefValue + TomoInputsPtr->alpha*(u-RefValue);
#ifdef POSITIVITY_CONSTRAINT
if (RefValue <= 0)
RefValue = 0;
#endif
return RefValue;
}
/*computes the value of cost function. 'ErrorSino' is the error sinogram*/
Real_t computeCost(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t cost=0,temp=0, forward=0, prior=0;
Real_t delta;
int32_t i,j,k,p,N_z;
bool j_minus, k_minus, i_plus, j_plus, k_plus, p_plus;
#pragma omp parallel for private(j, k, temp) reduction(+:cost)
for (i = 0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
{
temp = ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k]);
if (SinogramPtr->ProjSelect[i][j][k] == true)
temp = temp*temp;
else
temp = 2.0*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*fabs(temp) + TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoThresh*(1.0-2.0*TomoInputsPtr->ErrorSinoDelta);
cost += temp;
}
cost /= 2.0;
/*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/
temp = 0;
N_z = ScannedObjectPtr->N_z + 2;
if (TomoInputsPtr->node_rank == TomoInputsPtr->node_num-1)
N_z = ScannedObjectPtr->N_z + 1;
#pragma omp parallel for private(delta, p, j, k, j_minus, k_minus, p_plus, i_plus, j_plus, k_plus) reduction(+:temp)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (p = 1; p < ScannedObjectPtr->N_z + 1; p++)
for (j = 0; j < ScannedObjectPtr->N_y; j++)
{
for (k = 0; k < ScannedObjectPtr->N_x; k++)
{
j_minus = (j - 1 >= 0)? true : false;
k_minus = (k - 1 >= 0)? true : false;
p_plus = (p + 1 < N_z)? true : false;
i_plus = (i + 1 < ScannedObjectPtr->N_time)? true : false;
j_plus = (j + 1 < ScannedObjectPtr->N_y)? true : false;
k_plus = (k + 1 < ScannedObjectPtr->N_x)? true : false;
if(k_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j][k + 1]);
temp += TomoInputsPtr->Spatial_Filter[1][1][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
if(j_plus == true) {
if(k_minus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k - 1]);
temp += TomoInputsPtr->Spatial_Filter[1][2][0] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k]);
temp += TomoInputsPtr->Spatial_Filter[1][2][1] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
if(k_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k + 1]);
temp += TomoInputsPtr->Spatial_Filter[1][2][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
}
if (p_plus == true)
{
if(j_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k];
temp += TomoInputsPtr->Spatial_Filter[2][0][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j][k];
temp += TomoInputsPtr->Spatial_Filter[2][1][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
if(j_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j + 1][k];
temp += TomoInputsPtr->Spatial_Filter[2][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(j_minus == true)
{
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][0][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][0][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][1][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(j_plus == true)
{
if(k_minus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k - 1];
temp += TomoInputsPtr->Spatial_Filter[2][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(k_plus == true)
{
delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k + 1];
temp += TomoInputsPtr->Spatial_Filter[2][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr);
}
}
if(i_plus == true) {
delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i+1][p][j][k]);
temp += TomoInputsPtr->Time_Filter[0] * CE_QGGMRF_Temporal_Value(delta,ScannedObjectPtr,TomoInputsPtr);
}
}
}
/*Use MPI reduction operation to add the forward and prior costs from all nodes*/
MPI_Reduce(&cost, &forward, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&temp, &prior, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD);
if (TomoInputsPtr->node_rank == 0)
{
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Scaled error sino cost = %f\n",forward);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Decrease in scaled error sino cost = %f\n",TomoInputsPtr->ErrorSino_Cost-forward);
TomoInputsPtr->ErrorSino_Cost = forward;
forward += (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t*log(TomoInputsPtr->var_est)/2;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Forward cost = %f\n",forward);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Prior cost = %f\n",prior);
TomoInputsPtr->Forward_Cost = forward;
TomoInputsPtr->Prior_Cost = prior;
cost = forward + prior;
}
/*Broadcase the value of cost to all nodes*/
MPI_Bcast(&cost, 1, MPI_REAL_DATATYPE, 0, MPI_COMM_WORLD);
return cost;
}
/*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/
void upsample_bilinear_2D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t i, j, k, m;
Real_arr_t **buffer;
#pragma omp parallel for private(buffer, m, j, k)
for (i=0; i < N_time; i++)
for (m=0; m < N_z; m++)
{
buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x);
for (j=0; j < N_y; j++){
buffer[j][0] = Init[i][m][j][0];
buffer[j][1] = (3.0*Init[i][m][j][0] + Init[i][m][j][1])/4.0;
buffer[j][2*N_x - 1] = Init[i][m][j][N_x - 1];
buffer[j][2*N_x - 2] = (Init[i][m][j][N_x - 2] + 3.0*Init[i][m][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer[j][2*k] = (Init[i][m][j][k-1] + 3.0*Init[i][m][j][k])/4.0;
buffer[j][2*k + 1] = (3.0*Init[i][m][j][k] + Init[i][m][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
Object[i][m][0][k] = buffer[0][k];
Object[i][m][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0;
Object[i][m][2*N_y-1][k] = buffer[N_y-1][k];
Object[i][m][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++){
for (k=0; k<2*N_x; k++){
Object[i][m][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0;
Object[i][m][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0;
}
}
multifree(buffer,2);
}
}
/*Upsamples the (N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/
void upsample_object_bilinear_2D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t j, k, slice;
Real_arr_t **buffer;
buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer[j][0] = Init[slice][j][0];
buffer[j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0;
buffer[j][2*N_x - 1] = Init[slice][j][N_x - 1];
buffer[j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer[j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0;
buffer[j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
Object[slice+1][0][k] = buffer[0][k];
Object[slice+1][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0;
Object[slice+1][2*N_y-1][k] = buffer[N_y-1][k];
Object[slice+1][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++){
for (k=0; k<2*N_x; k++){
Object[slice+1][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0;
Object[slice+1][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0;
}
}
}
multifree(buffer,2);
}
void upsample_bilinear_3D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t i, j, k, slice;
Real_t ***buffer2D, ***buffer3D;
#pragma omp parallel for private(buffer2D, buffer3D, slice, j, k)
for (i=0; i < N_time; i++)
{
buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x);
buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer2D[slice][j][0] = Init[i][slice][j][0];
buffer2D[slice][j][1] = (3.0*Init[i][slice][j][0] + Init[i][slice][j][1])/4.0;
buffer2D[slice][j][2*N_x - 1] = Init[i][slice][j][N_x - 1];
buffer2D[slice][j][2*N_x - 2] = (Init[i][slice][j][N_x - 2] + 3.0*Init[i][slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer2D[slice][j][2*k] = (Init[i][slice][j][k-1] + 3.0*Init[i][slice][j][k])/4.0;
buffer2D[slice][j][2*k + 1] = (3.0*Init[i][slice][j][k] + Init[i][slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
buffer3D[slice][0][k] = buffer2D[slice][0][k];
buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0;
buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k];
buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++)
for (k=0; k<2*N_x; k++){
buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0;
buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0;
}
}
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[i][0][j][k] = buffer3D[0][j][k];
Object[i][1][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0;
Object[i][2*N_z-1][j][k] = buffer3D[N_z-1][j][k];
Object[i][2*N_z-2][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0;
}
for (slice=1; slice < N_z-1; slice++)
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[i][2*slice][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0;
Object[i][2*slice+1][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0;
}
multifree(buffer2D,3);
multifree(buffer3D,3);
}
}
/*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution.*/
/*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the in 3D x-y-z coordinates and stores it in 'Object'*/
void upsample_object_bilinear_3D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x)
{
int32_t j, k, slice;
Real_t ***buffer2D, ***buffer3D;
buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x);
buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x);
for (slice=0; slice < N_z; slice++){
for (j=0; j < N_y; j++){
buffer2D[slice][j][0] = Init[slice][j][0];
buffer2D[slice][j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0;
buffer2D[slice][j][2*N_x - 1] = Init[slice][j][N_x - 1];
buffer2D[slice][j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0;
for (k=1; k < N_x - 1; k++){
buffer2D[slice][j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0;
buffer2D[slice][j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0;
}
}
for (k=0; k < 2*N_x; k++){
buffer3D[slice][0][k] = buffer2D[slice][0][k];
buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0;
buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k];
buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0;
}
for (j=1; j<N_y-1; j++)
for (k=0; k<2*N_x; k++){
buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0;
buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0;
}
}
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[1][j][k] = buffer3D[0][j][k];
Object[2][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0;
Object[2*N_z][j][k] = buffer3D[N_z-1][j][k];
Object[2*N_z-1][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0;
}
for (slice=1; slice < N_z-1; slice++)
for (j=0; j<2*N_y; j++)
for (k=0; k<2*N_x; k++){
Object[2*slice+1][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0;
Object[2*slice+2][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0;
}
multifree(buffer2D,3);
multifree(buffer3D,3);
}
/*randomly select the voxels lines which need to be updated along the x-y plane for each z-block and time slice*/
void randomly_select_x_y (ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, uint8_t*** Mask)
{
int32_t i, j, num,n, Index, col, row, *Counter, ArraySize, block;
ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
Counter = (int32_t*)get_spc(ArraySize, sizeof(int32_t));
for (i=0; i<ScannedObjectPtr->N_time; i++)
for (block=0; block<TomoInputsPtr->num_z_blocks; block++)
{
ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
for (Index = 0; Index < ArraySize; Index++)
Counter[Index] = Index;
TomoInputsPtr->UpdateSelectNum[i][block] = 0;
for (j=0; j<ScannedObjectPtr->N_x*ScannedObjectPtr->N_y; j++){
Index = floor(random2() * ArraySize);
Index = (Index == ArraySize)?ArraySize-1:Index;
col = Counter[Index] % ScannedObjectPtr->N_x;
row = Counter[Index] / ScannedObjectPtr->N_x;
for (n = block*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n < (block+1)*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n++)
if (Mask[i][row][col] == 1)
{
num = TomoInputsPtr->UpdateSelectNum[i][block];
TomoInputsPtr->x_rand_select[i][block][num] = col;
TomoInputsPtr->y_rand_select[i][block][num] = row;
(TomoInputsPtr->UpdateSelectNum[i][block])++;
break;
}
Counter[Index] = Counter[ArraySize - 1];
ArraySize--;
}
}
free(Counter);
}
/*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution.
--initICD--
If 1, initializes the object to 0
If 2, the code uses bilinear interpolation to initialize the object if the previous reconstruction was at a lower resolution
The function also initializes the magnitude update map 'MagUpdateMap' from the previous coarser resolution
reconstruction. */
int32_t initObject (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t**** MagUpdateMap)
{
char object_file[100];
int dimTiff[4];
int32_t i, j, k, l, size, flag = 0;
Real_arr_t ***Init, ****UpMapInit;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (j = 0; j < ScannedObjectPtr->N_z; j++)
for (k = 0; k < ScannedObjectPtr->N_y; k++)
for (l = 0; l < ScannedObjectPtr->N_x; l++)
ScannedObjectPtr->Object[i][j+1][k][l] = OBJECT_INIT_VAL;
if (TomoInputsPtr->initICD > 3 || TomoInputsPtr->initICD < 0){
sentinel(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: initICD value not recognized.\n");
}
else if (TomoInputsPtr->initICD == 1)
{
size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i);
if (read_SharedBinFile_At (object_file, &(ScannedObjectPtr->Object[i][1][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
}
if (TomoInputsPtr->initMagUpMap == 1)
{
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
}
}
else if (TomoInputsPtr->initICD == 2 || TomoInputsPtr->initICD == 3)
{
if (TomoInputsPtr->initICD == 3)
{
Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 3D bilinear interpolation.\n");
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i);
size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8;
if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
upsample_object_bilinear_3D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
}
multifree(Init,3);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 3D bilinear interpolation.\n");
}
else
{
Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 2D bilinear interpolation.\n");
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i);
size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4;
if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
upsample_object_bilinear_2D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
}
multifree(Init,3);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 2D bilinear interpolation.\n");
}
if (TomoInputsPtr->initMagUpMap == 1)
{
if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks)
{
UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 2D bilinear interpolation.\n");
upsample_bilinear_2D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
multifree(UpMapInit,4);
}
else if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks/2)
{
UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8;
if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 3D bilinear interpolation.\n");
upsample_bilinear_3D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2);
multifree(UpMapInit,4);
}
else
{
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of axial blocks is incompatible with previous stage of multi-resolution.\n");
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Initializing the multi-resolution map to zeros.\n");
}
}
}
dimTiff[0] = ScannedObjectPtr->N_time; dimTiff[1] = TomoInputsPtr->num_z_blocks; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x;
sprintf(object_file, "%s_n%d", MAG_UPDATE_FILENAME, TomoInputsPtr->node_rank);
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(MagUpdateMap[0][0][0][0]), 0, TomoInputsPtr->debug_file_ptr))
flag = -1;
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
sprintf (object_file, "%s_n%d", INIT_OBJECT_FILENAME, TomoInputsPtr->node_rank);
sprintf (object_file, "%s_time_%d", object_file, i);
dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x;
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(ScannedObjectPtr->Object[i][1][0][0]), 0, TomoInputsPtr->debug_file_ptr))
flag = -1;
}
return (flag);
error:
return (-1);
}
/*'initErrorSinogram' is used to initialize the error sinogram before start of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting the obkject x.*/
int32_t initErrorSinogam (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, Real_arr_t*** ErrorSino/*, AMatrixCol* VoxelLineResponse*/)
{
Real_t pixel, avg=0;
int32_t dimTiff[4], i, j, k, p, sino_idx, slice, flag = 0;
AMatrixCol* AMatrixPtr = (AMatrixCol*)get_spc(ScannedObjectPtr->N_time, sizeof(AMatrixCol));
uint8_t AvgNumXElements = (uint8_t)ceil(3*ScannedObjectPtr->delta_xy/SinogramPtr->delta_r);
char error_file[100] = "error_sinogram";
sprintf(error_file, "%s_n%d", error_file, TomoInputsPtr->node_rank);
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
AMatrixPtr[i].values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t));
AMatrixPtr[i].index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t));
}
memset(&(ErrorSino[0][0][0]), 0, SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r*sizeof(Real_arr_t));
#pragma omp parallel for private(j, k, p, sino_idx, slice, pixel)
for (i=0; i<ScannedObjectPtr->N_time; i++)
{
for (j=0; j<ScannedObjectPtr->N_y; j++)
{
for (k=0; k<ScannedObjectPtr->N_x; k++){
for (p=0; p<ScannedObjectPtr->ProjNum[i]; p++){
sino_idx = ScannedObjectPtr->ProjIdxPtr[i][p];
calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, DetectorResponse, &(AMatrixPtr[i]), j, k, sino_idx);
for (slice=0; slice<ScannedObjectPtr->N_z; slice++){
/* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/
pixel = ScannedObjectPtr->Object[i][slice+1][j][k]; /*slice+1 to account for extra z slices required for MPI*/
forward_project_voxel (SinogramPtr, pixel, ErrorSino, &(AMatrixPtr[i])/*, &(VoxelLineResponse[slice])*/, sino_idx, slice);
}
}
}
}
}
#pragma omp parallel for private(j, k) reduction(+:avg)
for(i=0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
{
ErrorSino[i][j][k] = SinogramPtr->Projection[i][j][k] - ErrorSino[i][j][k] - SinogramPtr->ProjOffset[j][k];
if (fabs(ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[i][j][k] = true;
else
SinogramPtr->ProjSelect[i][j][k] = false;
/* if (ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k]) < -30)
TomoInputsPtr->Weight[i][j][k] = 0;*/
avg+=ErrorSino[i][j][k];
}
avg = avg/(SinogramPtr->N_r*SinogramPtr->N_t*SinogramPtr->N_p);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average of error sinogram in node %d is %f\n", TomoInputsPtr->node_rank, avg);
dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t;
if (TomoInputsPtr->Write2Tiff == 1)
flag = WriteMultiDimArray2Tiff (error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr);
for (i = 0; i < ScannedObjectPtr->N_time; i++)
{
free(AMatrixPtr[i].values);
free(AMatrixPtr[i].index);
}
free (AMatrixPtr);
multifree(SinogramPtr->Projection,3);
return (flag);
}
/*Updates the variance parameter \sigma*/
void update_variance_parameter (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
int32_t k, i, j;
Real_t temp_acc = 0, temp = 0;
#pragma omp parallel for private(i, j, temp) reduction(+:temp_acc)
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
TomoInputsPtr->Weight[k][i][j] = TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est;
if (SinogramPtr->ProjSelect[k][i][j] == true)
temp = ErrorSino[k][i][j]*ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
else
temp = fabs(ErrorSino[k][i][j])*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*sqrt(TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est);
temp_acc += temp;
}
MPI_Allreduce(&temp_acc, &temp, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
TomoInputsPtr->var_est = temp/((Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t);
#pragma omp parallel for private(i, j)
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
TomoInputsPtr->Weight[k][i][j] /= TomoInputsPtr->var_est;
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
void update_d_offset_rect_patch_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **b, **Lambda, temp;
Real_arr_t **x;
int32_t i, j, k;
b = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
Lambda = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
x = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
memset(&(b[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
memset(&(Lambda[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
memset(&(x[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t));
#pragma omp parallel for collapse(2) private(i, j, k, temp, sign)
for (i = 0; i < SinogramPtr->N_r; i++)
{
for (j = 0; j < SinogramPtr->N_t; j++)
{
b[i][j] = 0;
Lambda[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
Lambda[i][j] += TomoInputsPtr->Weight[k][i][j];
b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
Lambda[i][j] += temp/fabs(ErrorSino[k][i][j]);
b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*temp/fabs(ErrorSino[k][i][j]);
}
}
}
}
constrained_quad_opt (Lambda, b, SinogramPtr->off_constraint, x, SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->off_constraint_num, TomoInputsPtr);
#pragma omp parallel for collapse(3) private(i, j, k)
for (k = 0; k < SinogramPtr->N_p; k++)
{
for (i = 0; i < SinogramPtr->N_r; i++)
{
for (j = 0; j < SinogramPtr->N_t; j++)
{
ErrorSino[k][i][j] += SinogramPtr->ProjOffset[i][j] - x[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
}
memcpy(&(SinogramPtr->ProjOffset[0][0]),&(x[0][0]),SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t));
multifree(b,2);
multifree(Lambda,2);
multifree(x,2);
}
/*Updates the projection offset error parameter d_i*/
void update_d_offset_zero_mean_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **numerator, num_sum = 0, temp, **denominator, den_sum = 0, gamma = 0;
int32_t i, j, k;
numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
#pragma omp parallel for private(j, k, temp, sign) reduction(+:num_sum, den_sum)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
numerator[i][j] = 0;
denominator[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
denominator[i][j] += TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
numerator[i][j] += temp*sign;
denominator[i][j] += temp/fabs(ErrorSino[k][i][j]);
}
}
num_sum += SinogramPtr->ProjOffset[i][j] + (numerator[i][j]/denominator[i][j]);
den_sum += 1.0/denominator[i][j];
}
gamma = num_sum/den_sum;
#pragma omp parallel for private(j, k)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j]-gamma)/denominator[i][j];
for (k = 0; k < SinogramPtr->N_p; k++)
{
ErrorSino[k][i][j] -= (numerator[i][j]-gamma)/denominator[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
multifree(numerator,2);
multifree(denominator,2);
}
/*Updates the projection offset error parameter d_i*/
void update_d_offset_unconstrained (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
Real_t sign, **numerator, temp, **denominator;
int32_t i, j, k;
numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
#pragma omp parallel for private(j, k, temp, sign)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
numerator[i][j] = 0;
denominator[i][j] = 0;
for (k = 0; k < SinogramPtr->N_p; k++)
{
temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]);
if (SinogramPtr->ProjSelect[k][i][j] == true)
{
numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j];
denominator[i][j] += TomoInputsPtr->Weight[k][i][j];
}
else
{
sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0);
numerator[i][j] += temp*sign;
denominator[i][j] += temp/fabs(ErrorSino[k][i][j]);
}
}
}
#pragma omp parallel for private(j, k)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
{
SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j])/denominator[i][j];
for (k = 0; k < SinogramPtr->N_p; k++)
{
ErrorSino[k][i][j] -= (numerator[i][j])/denominator[i][j];
if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh)
SinogramPtr->ProjSelect[k][i][j] = true;
else
SinogramPtr->ProjSelect[k][i][j] = false;
}
}
multifree(numerator,2);
multifree(denominator,2);
}
void update_Sinogram_Offset (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino)
{
if (TomoInputsPtr->OffsetConstraintType == 1)
update_d_offset_unconstrained (SinogramPtr, TomoInputsPtr, ErrorSino);
else if (TomoInputsPtr->OffsetConstraintType == 2)
update_d_offset_zero_mean_constraint (SinogramPtr, TomoInputsPtr, ErrorSino);
else if (TomoInputsPtr->OffsetConstraintType == 3)
update_d_offset_rect_patch_constraint (SinogramPtr, TomoInputsPtr, ErrorSino);
}
/*Implements mutithreaded shared memory parallelization using OpenMP and splits work among
threads. Each thread gets a certain time slice and z block to update.
Multithreading is done within the z-blocks assigned to each node.
ErrorSino - Error sinogram
Iter - Present iteration number
MagUpdateMap - Magnitude update map containing the magnitude of update of each voxel
Mask - If a certain element is true then the corresponding voxel is updated*/
int updateVoxelsTimeSlices(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, /*AMatrixCol* VoxelLineResponse,*/ Real_arr_t*** ErrorSino, int32_t Iter, Real_arr_t**** MagUpdateMap, uint8_t*** Mask)
{
Real_t AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0;
int32_t xy_start, xy_end, i, j, K, block, idx, **z_start, **z_stop;
Real_t tempTotPix = 0, total_pix = 0;
long int **zero_count, total_zero_count = 0;
int32_t** thread_num = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
MPI_Request *send_reqs, *recv_reqs;
send_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request));
recv_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request));
z_start = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
z_stop = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
randomly_select_x_y (ScannedObjectPtr, TomoInputsPtr, Mask);
zero_count = (long int**)multialloc(sizeof(long int), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks);
/* offset_numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t);
memset(&(offset_denominator[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t));
for (k = 0; k < SinogramPtr->N_p; k++)
for (i = 0; i < SinogramPtr->N_r; i++)
for (j = 0; j < SinogramPtr->N_t; j++)
offset_denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; */
memset(&(zero_count[0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*sizeof(long int));
/* K = ScannedObjectPtr->N_time*ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
K = (K - total_zero_count)/(ScannedObjectPtr->gamma*K);*/
K = ScannedObjectPtr->NHICD_Iterations;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K);
for (j = 0; j < K; j++)
{
total_vox_mag = 0.0;
#pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block: block + 1;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K);
xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1;
xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end;
/* printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/
total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
}
/*check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Send MPI info\n");*/
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Will compute projection offset error\n");*/
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Done computing projection offset error\n");*/
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
#pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block + 1: block;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K);
xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1;
xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end;
total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 2 - i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
VSC_based_Voxel_Line_Select(ScannedObjectPtr, TomoInputsPtr, MagUpdateMap);
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD voxel lines to be updated in iteration %d is %d\n", j, num_voxel_lines);*/
if (Iter > 1 && TomoInputsPtr->no_NHICD == 0)
{
#pragma omp parallel for collapse(2) private(i, block, idx)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block: block + 1;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 1 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0);
#pragma omp parallel for collapse(2) private(i, block, idx)
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2)
{
idx = (i % 2 == 0) ? block + 1: block;
z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1;
z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx];
updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]);
thread_num[i][idx] = omp_get_thread_num();
/* printf ("Loop 2 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/
}
MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
if (TomoInputsPtr->updateProjOffset > 1)
update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino);
MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1);
}
}
if (TomoInputsPtr->updateVar == 1)
update_variance_parameter (SinogramPtr, TomoInputsPtr, ErrorSino);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time Slice, Z Start, Z End - Thread : ");
total_pix = 0;
for (i=0; i<ScannedObjectPtr->N_time; i++){
for (block=0; block<TomoInputsPtr->num_z_blocks; block++){
total_pix += TomoInputsPtr->UpdateSelectNum[i][block]*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks);
for (j=0; j<TomoInputsPtr->UpdateSelectNum[i][block]; j++){
AverageUpdate += MagUpdateMap[i][block][TomoInputsPtr->y_rand_select[i][block][j]][TomoInputsPtr->x_rand_select[i][block][j]];
}
total_zero_count += zero_count[i][block];
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "%d,%d,%d-%d; ", i, z_start[i][block], z_stop[i][block], thread_num[i][block]);
}
}
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "\n");
MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD);
AverageUpdate = tempUpdate/(tempTotPix);
AverageUpdate = convert2Hounsfield(AverageUpdate);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average voxel update over all voxels is %f, total voxels is %f.\n", AverageUpdate, tempTotPix);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Zero count is %ld.\n", total_zero_count);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter divisor is %f.\n", (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t);
multifree(zero_count,2);
multifree(thread_num,2);
multifree(z_start,2);
multifree(z_stop,2);
free(send_reqs);
free(recv_reqs);
/* multifree(offset_numerator,2);
multifree(offset_denominator,2);*/
avg_update_percentage = 100*tempUpdate/vox_mag;
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is %f.\n", avg_update_percentage);
if (avg_update_percentage < TomoInputsPtr->StopThreshold)
{
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n");
return (1);
}
return(0);
}
/*ICD_BackProject calls the ICD optimization function repeatedly till the stopping criteria is met.*/
int ICD_BackProject(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr)
{
#ifndef NO_COST_CALCULATE
Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0;
char costfile[100]=COST_FILENAME;
#endif
Real_arr_t ***ErrorSino, **H_r, *H_t;
Real_t x, y;
int32_t j, flag = 0, Iter, i, k;
int dimTiff[4];
char VarEstFile[100] = VAR_PARAM_FILENAME;
char scaled_error_file[100] = SCALED_ERROR_SINO_FILENAME;
time_t start;
char detect_file[100] = DETECTOR_RESPONSE_FILENAME;
char projselect_file[100] = PROJ_SELECT_FILENAME;
char MagUpdateMapFile[100] = MAG_UPDATE_FILENAME;
uint8_t ***Mask;
/*AMatrixCol *VoxelLineResponse;*/
#ifdef POSITIVITY_CONSTRAINT
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Enforcing positivity constraint\n");
#endif
Real_arr_t**** MagUpdateMap = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x);
H_r = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS + 1);
H_t = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t));
ErrorSino = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_r, SinogramPtr->N_t);
Mask = (uint8_t***)multialloc(sizeof(uint8_t), 3, ScannedObjectPtr->N_time, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x);
memset(&(MagUpdateMap[0][0][0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x*sizeof(Real_arr_t));
/* omp_set_num_threads(TomoInputsPtr->num_threads);*/
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs());
/* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ICD_BackProject: Number of threads is %d\n", TomoInputsPtr->num_threads) ;*/
for (i = 0; i < ScannedObjectPtr->N_time; i++)
for (j = 0; j < ScannedObjectPtr->N_y; j++)
for (k = 0; k < ScannedObjectPtr->N_x; k++){
x = ScannedObjectPtr->x0 + ((Real_t)k + 0.5)*ScannedObjectPtr->delta_xy;
y = ScannedObjectPtr->y0 + ((Real_t)j + 0.5)*ScannedObjectPtr->delta_xy;
if (x*x + y*y < TomoInputsPtr->radius_obj*TomoInputsPtr->radius_obj)
Mask[i][j][k] = 1;
else
Mask[i][j][k] = 0;
}
DetectorResponseProfile (H_r, H_t, SinogramPtr, ScannedObjectPtr, TomoInputsPtr);
dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinogramPtr->N_p; dimTiff[3] = DETECTOR_RESPONSE_BINS+1;
sprintf(detect_file, "%s_n%d", detect_file, TomoInputsPtr->node_rank);
if (TomoInputsPtr->Write2Tiff == 1)
if (WriteMultiDimArray2Tiff (detect_file, dimTiff, 0, 1, 2, 3, &(H_r[0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
start = time(NULL);
if (initObject(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, MagUpdateMap)) goto error;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to read object = %fmins\n", difftime(time(NULL),start)/60.0);
if (initErrorSinogam(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, ErrorSino/*, VoxelLineResponse*/)) goto error;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL),start)/60.0);
#ifndef NO_COST_CALCULATE
cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino);
cost_0_iter = cost;
cost_last_iter = cost;
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n",cost);
if (TomoInputsPtr->node_rank == 0)
Write2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr);
#endif /*Cost calculation endif*/
start=time(NULL);
for (Iter = 1; Iter <= TomoInputsPtr->NumIter; Iter++)
{
flag = updateVoxelsTimeSlices (SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, /*VoxelLineResponse,*/ ErrorSino, Iter, MagUpdateMap, Mask);
if (TomoInputsPtr->WritePerIter == 1)
if (write_ObjectProjOff2TiffBinPerIter (SinogramPtr, ScannedObjectPtr, TomoInputsPtr)) goto error;
#ifndef NO_COST_CALCULATE
cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino);
percentage_change_in_cost = ((cost - cost_last_iter)/(cost - cost_0_iter))*100.0;
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f.\n", TomoInputsPtr->var_est);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n",Iter,cost,difftime(time(NULL),start)/60.0);
if (TomoInputsPtr->node_rank == 0)
Append2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr);
check_error(cost > cost_last_iter, TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cost value increased.\n");
cost_last_iter = cost;
/*if (percentage_change_in_cost < TomoInputsPtr->cost_thresh && flag != 0 && Iter > 1){*/
if (flag != 0 && Iter > 1){
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n");
break;
}
#else
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f\n",TomoInputsPtr->var_est);
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n",Iter,difftime(time(NULL),start)/60.0);
if (flag != 0 && Iter > 1){
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n");
break;
}
#endif
flag = fflush(TomoInputsPtr->debug_file_ptr);
if (flag != 0)
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n");
}
for (i = 0; i < SinogramPtr->N_p; i++)
for (j = 0; j < SinogramPtr->N_r; j++)
for (k = 0; k < SinogramPtr->N_t; k++)
ErrorSino[i][j][k] *= sqrt(TomoInputsPtr->Weight[i][j][k]);
if (TomoInputsPtr->node_rank == 0)
Write2Bin (VarEstFile, 1, 1, 1, 1, sizeof(Real_t), &(TomoInputsPtr->var_est), TomoInputsPtr->debug_file_ptr);
int32_t size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x;
if (write_SharedBinFile_At (MagUpdateMapFile, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) goto error;
sprintf(scaled_error_file, "%s_n%d", scaled_error_file, TomoInputsPtr->node_rank);
sprintf(projselect_file, "%s_n%d", projselect_file, TomoInputsPtr->node_rank);
dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t;
if (TomoInputsPtr->Write2Tiff == 1)
{
if (WriteMultiDimArray2Tiff (scaled_error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
if (WriteBoolArray2Tiff (projselect_file, dimTiff, 0, 3, 1, 2, &(SinogramPtr->ProjSelect[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error;
}
multifree(ErrorSino,3);
multifree(H_r,2);
free(H_t);
multifree(Mask,3);
multifree(MagUpdateMap, 4);
check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Finished running ICD_BackProject.\n");
flag = fflush(TomoInputsPtr->debug_file_ptr);
if (flag != 0 )
check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n");
check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "The estimated value of variance parameter is %f.\n", TomoInputsPtr->var_est);
return(0);
error:
multifree(ErrorSino,3);
multifree(H_r,2);
free(H_t);
multifree(Mask,3);
multifree(MagUpdateMap, 4);
return(-1);
}
|
vow.c | /********************************************************************************************
* SIDH: an efficient supersingular isogeny cryptography library
*
* Abstract: functions for van Oorschot-Wiener attack
*********************************************************************************************/
#include <omp.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <signal.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include "prng.h"
#include "../tests/test_extras.h"
#include "triples.h"
#include "sync_strategies.c"
#include "benchmarking.c"
// Print statements for debugging
void print_st(st_t *s, shared_state_t *shared_state)
{
uint64_t i;
if (s->bytes == NULL) {
for (i = 0; i < shared_state->NBYTES_STATE; i++)
printf("--");
return;
}
for (i = 0; i < shared_state->NBYTES_STATE; i++)
printf("%02x", s->bytes[i]);
}
/**
* @brief runs one "iteration" of vOW: sampling a point, checking for distinguishedness and possibly backtracking
*
* @param S shared state pointer
* @param private_state private state pointer
* @param t temporary triple pointer
* @param success pointer to global success variable
* @return true vOW terminated, break out of loop
* @return false keep looping
*/
static inline bool vOW_one_iteration(
shared_state_t *S,
private_state_t *private_state,
trip_t *t,
bool *success,
double ratio_of_points_to_mine)
{
// printf("mine a point (1 vow iteration)\n");
// Walk to the next point using the current random function
update(private_state);
private_state->current.current_steps += 1;
// Check if the new point is distinguisihed
if (distinguished(private_state))
{
// Found a distinguished point. Try backtracking if unsuccessful, sample a new starting point
uint64_t id;
bool read;
bool res;
private_state->current_dist++;
private_state->dist_points++; // S->current_dist gets reset, this doesn't
id = mem_index(private_state);
copy_trip(&private_state->trip, &S->memory[id], private_state->NWORDS_STATE);
read = (private_state->trip.current_steps > 0);
// Did not get a collision in value, hence it was just a memory address collision
if (!read || !is_equal_st(&private_state->trip.current_state, &private_state->current.current_state, private_state->NWORDS_STATE)) {
private_state->mem_collisions += 1;
} else {
// Not a simple memory collision, backtrack!
// printf("Collision ");
// print_st(&private_state->trip.current_state, S);
// printf("\n");
copy_trip(t, &private_state->current, private_state->NWORDS_STATE);
res = backtrack(&private_state->trip, t, S, private_state);
// Only check for success when not running for stats
if (!private_state->collect_vow_stats) {
if (res || *success) {
*success = true;
return true;
}
}
}
// Didn't get the golden collision, write the current distinguished point to memory
// and sample a new starting point
write_to_memory(&private_state->current, S, private_state, id);
sample(private_state);
}
// Check if enough points have been mined for the current random function
if (private_state->current_dist >= private_state->MAX_DIST * ratio_of_points_to_mine) {
// Enough points collected for this random function
if (!private_state->collect_vow_stats) {
#if defined(STAKHANOVIST_SYNC)
if (stakhanovist_resync_should_resync(S, private_state)) {
sample(private_state);
update_random_function(S, private_state);
stakhanovist_resync_do_resync(S, private_state);
}
#elif defined(WINDOWED_SYNC)
// In real attack. Sample a new starting point and random function
sample(private_state);
update_random_function(S, private_state);
private_state->random_functions++; // maybe this could be merged with update_random_function
private_state->current_dist = 0;
#elif defined(NOBIGGIE_SYNC)
if (nobiggie_resync_should_resync(S, private_state, success)) {
// Resync, no thread has found the solution in this function version, so barriers inside this scope would be hit by all
nobiggie_resync_do_resync(S, private_state);
// Wait for 0 to reset S->resync_cores inside resync
#pragma omp barrier
} else {
// Some core found the solution while waiting
return true;
}
#endif
} else {
// we are collecting stats only for one random function, can stop vOW
return true;
}
}
if (private_state->current.current_steps >= private_state->MAX_STEPS) {
// Walked too long without finding a new distinguished point
// hence, sample a new starting point
sample(private_state);
}
return false;
}
#if (OS_TARGET == OS_LINUX)
// Handle Ctrl+C to stop prematurely and collect statistics
bool ctrl_c_pressed = false;
void sigintHandler(int sig_num)
{
/* Refer http://en.cppreference.com/w/c/program/signal */
ctrl_c_pressed = true;
}
#endif
bool vOW(shared_state_t *S)
{
bool success = false;
double start_wall_time = omp_get_wtime();
double *points_ratio = NULL;
S->cpu_cycles = -cpu_cycles();
// Explicitly disable dynamic teams (ensures running on S->N_OF_CORES cores)
omp_set_dynamic(0);
// Runs cores benchmarks (across remote machines if used) to allocate work
points_ratio = (double *)malloc(S->N_OF_CORES * sizeof(double));
if (points_ratio == NULL) {
fprintf(stderr, "error: could not alloc points_ratio memory");
goto end;
}
run_benchmark(points_ratio, S->instance, 5000);
// Runs the real attack
#pragma omp parallel num_threads(S->N_OF_CORES)
{
private_state_t private_state;
init_private_state(S, &private_state);
double ratio_of_points_to_mine = points_ratio[private_state.thread_id];
double internal_cpu_time = omp_get_wtime();
initialize_private_memory(S, &private_state);
trip_t t = init_trip(private_state.NWORDS_STATE);
#if (OS_TARGET == OS_LINUX)
// Set a Ctrl+C handler to dump statistics
signal(SIGINT, sigintHandler);
#endif
// while we haven't exhausted the random functions to try
while (private_state.random_functions <= private_state.MAX_FUNCTION_VERSIONS && !success)
{
#if (OS_TARGET == OS_LINUX)
if (ctrl_c_pressed) {
printf("\n%d: thinks ctrl+c was pressed", private_state.thread_id);
break;
}
#endif
#if defined(WINDOWED_SYNC)
// "Windowed" resync
windowed_resync(S, &private_state);
#endif
// Mine new points
if (vOW_one_iteration(S, &private_state, &t, &success, ratio_of_points_to_mine)) {
break;
}
}
internal_cpu_time = omp_get_wtime() - internal_cpu_time;
// Collect all the stats from each thread
#pragma omp critical
{
S->collisions += private_state.collisions;
S->mem_collisions += private_state.mem_collisions;
S->dist_points += private_state.dist_points;
S->number_steps_collect += private_state.number_steps_collect;
S->number_steps_locate += private_state.number_steps_locate;
S->number_steps = S->number_steps_collect + S->number_steps_locate;
S->total_time += internal_cpu_time;
S->final_avg_random_functions += (double)private_state.random_functions / (double)S->N_OF_CORES;
}
free_trip(&t);
cleanup_private_memory(&private_state);
free_private_state(&private_state);
}
end:
#if (OS_TARGET == OS_LINUX)
ctrl_c_pressed = false;
#endif
S->cpu_cycles += cpu_cycles();
free(points_ratio);
S->success = success;
S->wall_time = omp_get_wtime() - start_wall_time;
return success;
}
|
abstract_pivot_column.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include "../helpers/misc.h"
#include "../representations/vector_vector.h"
namespace phat {
// Note: We could even make the rep generic in the underlying Const representation
// But I cannot imagine that anything else than vector<vector<index>> would
// make sense
template< typename PivotColumn >
class abstract_pivot_column : public vector_vector {
protected:
typedef vector_vector Base;
typedef PivotColumn pivot_col;
// For parallization purposes, it could be more than one full column
mutable thread_local_storage< pivot_col > pivot_cols;
mutable thread_local_storage< index > idx_of_pivot_cols;
pivot_col& get_pivot_col() const {
return pivot_cols();
}
bool is_pivot_col( index idx ) const {
return idx_of_pivot_cols() == idx;
}
void release_pivot_col() {
index idx = idx_of_pivot_cols();
if( idx != -1 ) {
this->matrix[ idx ].clear();
pivot_cols().get_col_and_clear( this->matrix[ idx ] );
}
idx_of_pivot_cols() = -1;
}
void make_pivot_col( index idx ) {
release_pivot_col();
idx_of_pivot_cols() = idx;
get_pivot_col().add_col( matrix[ idx ] );
}
public:
void _set_num_cols( index nr_of_cols ) {
#pragma omp parallel for
for( int tid = 0; tid < omp_get_num_threads(); tid++ ) {
pivot_cols[ tid ].init( nr_of_cols );
idx_of_pivot_cols[ tid ] = -1;
}
Base::_set_num_cols( nr_of_cols );
}
void _add_to( index source, index target ) {
if( !is_pivot_col( target ) )
make_pivot_col( target );
get_pivot_col().add_col( matrix[source] );
}
void _sync() {
#pragma omp parallel for
for( int tid = 0; tid < omp_get_num_threads(); tid++ )
release_pivot_col();
}
void _get_col( index idx, column& col ) const { is_pivot_col( idx ) ? get_pivot_col().get_col( col ) : Base::_get_col( idx, col ); }
bool _is_empty( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().is_empty() : Base::_is_empty( idx ); }
index _get_max_index( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().get_max_index() : Base::_get_max_index( idx ); }
void _clear( index idx ) { is_pivot_col( idx ) ? get_pivot_col().clear() : Base::_clear( idx ); }
void _set_col( index idx, const column& col ) { is_pivot_col( idx ) ? get_pivot_col().set_col( col ) : Base::_set_col( idx, col ); }
void _remove_max( index idx ) { is_pivot_col( idx ) ? get_pivot_col().remove_max() : Base::_remove_max( idx ); }
void finalize( index idx ) { Base::_finalize( idx ); }
};
}
|
FindStartIndexWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h
#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_dem_contourtree_mesh_inc
{
class FindStartIndexWorklet : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn neighbours, // (input) neighbours
WholeArrayIn arcs, // (input) arcs
WholeArrayOut firstNeighbour); // (output) firstNeighbours
typedef void ExecutionSignature(_1, InputIndex, _2, _3);
typedef _1 InputDomain;
// Default Constructor
VTKM_EXEC_CONT
FindStartIndexWorklet() {}
template <typename InFieldPortalType, typename OutFieldPortalType>
VTKM_EXEC void operator()(const InFieldPortalType& neighboursPortal,
vtkm::Id sortedArcNo,
const InFieldPortalType& arcsPortal,
const OutFieldPortalType& firstNeighbourPortal) const
{
if (sortedArcNo > 0)
{
vtkm::Id prevFrom = (neighboursPortal.Get(sortedArcNo - 1) % 2 == 0)
? neighboursPortal.Get(sortedArcNo - 1) / 2
: maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo - 1) / 2));
vtkm::Id currFrom = (neighboursPortal.Get(sortedArcNo) % 2 == 0)
? neighboursPortal.Get(sortedArcNo) / 2
: maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo) / 2));
if (currFrom != prevFrom)
{
firstNeighbourPortal.Set(currFrom, sortedArcNo);
}
}
else // sortedArcNo == 0
{
firstNeighbourPortal.Set(0, 0);
}
// In serial this worklet implements the following operation
// #pragma omp parallel for
// for (indexVector::size_type sortedArcNo = 1; sortedArcNo < neighbours.size(); ++sortedArcNo)
// {
// indexType prevFrom = (neighbours[sortedArcNo-1] % 2 == 0) ? neighbours[sortedArcNo-1]/2 : maskedIndex(arcs[neighbours[sortedArcNo-1]/2]);
// indexType currFrom = (neighbours[sortedArcNo ] % 2 == 0) ? neighbours[sortedArcNo ]/2 : maskedIndex(arcs[neighbours[sortedArcNo ]/2]);
// if (currFrom != prevFrom)
// {
// assert(currFrom < firstNeighbour.size());
// firstNeighbour[currFrom] = sortedArcNo;
// }
// }
}
}; // ComputeMaxNeighboursWorklet
} // namespace mesh_dem_contourtree_mesh_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif
|
mpncra.c | /* $Header$ */
/* This single source file may be called as three separate executables:
ncra -- netCDF record averager
nces -- netCDF ensemble statistics
ncrcat -- netCDF record concatenator */
/* Purpose: Compute averages or extract series of specified hyperslabs of
specfied variables of multiple input netCDF files and output them
to a single file. */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncra -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
ncra -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
ncra -n 3,4,1 -p /ZENDER/tmp -l ${HOME}/nco/data h0001.nc ~/foo.nc
scp ~/nco/src/nco/ncra.c esmf.ess.uci.edu:nco/src/nco
nces in.nc in.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
nces -n 3,4,1 -p /ZENDER/tmp -l ${HOME} h0001.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <assert.h> /* assert() debugging macro */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* Internationalization i18n, Linux Journal 200211 p. 57--59 */
#ifdef I18N
#include <libintl.h> /* Internationalization i18n */
#include <locale.h> /* Locale setlocale() */
#define _(sng) gettext (sng)
#define gettext_noop(sng) (sng)
#define N_(sng) gettext_noop(sng)
#endif /* I18N */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
#include <mpi.h> /* MPI definitions */
#include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
#ifdef ENABLE_MPI
void checkpointMpi(int prc_rnk, int stage){
int msg[]={0,0};
int rcd; /* [rcd] Return code */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
if(prc_rnk == rnk_mgr){
msg[0]=stage;
msg[1]=stage;
} /* endif */
(void)fprintf(fp_stderr,"%d checkpointing at stage %d\n",prc_rnk,stage);
/* make everyone continue from this point. */
rcd=MPI_Bcast(msg,2,MPI_INT,rnk_mgr,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) {
/* basic sanity check */
assert(msg[0] == stage);
assert(msg[1] == stage);
} /* end if */
} /* end checkpointMpi() */
#endif /* !ENABLE_MPI */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_op_typ_sng=NULL_CEWI; /* [sng] Operation type Option y */
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FHhL:l:n:Oo:p:P:rRSt:v:xY:y:-:";
dmn_sct **dim;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
int *in_id_arr;
int abb_arg_nbr=0;
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_dmn_xtr;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int nco_op_typ=nco_op_avg; /* [enm] Default operation is averaging */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Default packing is none */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int rec_dmn_id=NCO_REC_DMN_UNDEFINED;
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **lmt=NULL_CEWI;
lmt_sct *lmt_rec=NULL_CEWI;
lmt_all_sct **lmt_all_lst; /* List of *lmt_all structures */
lmt_all_sct *lmt_all_rec=NULL_CEWI; /* Pointer to record limit structure in above list */
long idx_rec; /* [idx] Index of current record in current input file */
long rec_usd_cml=0L; /* [idx] Index of current record in output file (0 is first, ...) */
nco_bool CNV_ARM;
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_APPEND=True; /* Option H */
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool LAST_RECORD=False;
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nco_int base_time_srt=nco_int_CEWI;
nco_int base_time_crr=nco_int_CEWI;
nm_id_sct *dmn_lst;
nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out=NULL_CEWI;
var_sct **var_prc;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int jdx=0; /* [idx] MPI index for local variables */
int lcl_idx_lst[60]; /* [arr] Array containing indices of variables processed at each Worker */
int lcl_nbr_var=0; /* [nbr] Count of variables processes at each Worker */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rnk=0; /* [idx] Rank of process holding write token */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"fl_lst_in",no_argument,0,'H'},
{"file_list",no_argument,0,'H'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"nintap",required_argument,0,'n'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"path",required_argument,0,'p'},
{"pack",required_argument,0,'P'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"variable",required_argument,0,'v'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"pseudonym",required_argument,0,'Y'},
{"program",required_argument,0,'Y'},
{"prg_nm",required_argument,0,'Y'},
{"math",required_argument,0,'y'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef _LIBINTL_H
setlocale(LC_ALL,""); /* LC_ALL sets all localization tokens to same value */
bindtextdomain("nco","/home/zender/share/locale"); /* ${LOCALEDIR} is e.g., /usr/share/locale */
/* MO files should be in ${LOCALEDIR}/es/LC_MESSAGES */
textdomain("nco"); /* PACKAGE is name of program */
#endif /* not _LIBINTL_H */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr);
MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start clock and save command line */
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'H': /* Toggle writing input file list attribute */
FL_LST_IN_APPEND=!FL_LST_IN_APPEND;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'n': /* NINTAP-style abbreviation of files to average */
fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr);
if(abb_arg_nbr < 1 || abb_arg_nbr > 6){
(void)fprintf(stdout,gettext("%s: ERROR Incorrect abbreviation for file list\n"),nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
} /* end if */
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stderr,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm_get());
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case 'Y': /* Pseudonym */
/* Call nco_prg_prs to reset pseudonym */
optarg_lcl=(char *)strdup(optarg);
if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm);
nco_prg_nm=nco_prg_prs(optarg_lcl,&nco_prg_id);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'y': /* Operation type */
nco_op_typ_sng=(char *)strdup(optarg);
if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge) nco_op_typ=nco_op_typ_get(nco_op_typ_sng);
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
if(lmt_nbr > 0) lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get number of variables, dimensions, and record dimension ID of input file */
(void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id);
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv);
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr);
/* Fill-in dimension structure for all extracted dimensions */
dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm);
/* Dimension list no longer needed */
dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr);
/* Merge hyperslab limit information into dimension structures */
if(nbr_dmn_fl > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr,lmt_all_lst,nbr_dmn_fl);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++){
dmn_out[idx]=nco_dmn_dpl(dim[idx]);
(void)nco_dmn_xrf(dim[idx],dmn_out[idx]);
} /* end loop over idx */
/* Create stand-alone limit structure just for record dimension */
if(rec_dmn_id == NCO_REC_DMN_UNDEFINED){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
(void)fprintf(stdout,gettext("%s: ERROR input file %s lacks a record dimension\n"),nco_prg_nm_get(),fl_in);
if(fl_nbr == 1)(void)fprintf(stdout,gettext("%s: HINT Use ncks instead of %s\n"),nco_prg_nm_get(),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* endif */
}else{ /* Record dimension exists */
lmt_rec=nco_lmt_sct_mk(in_id,rec_dmn_id,lmt,lmt_nbr,FORTRAN_IDX_CNV);
/* Initialize record coordinate re-basing */
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
int var_id;
lmt_rec->lmt_cln=cln_nil;
lmt_rec->origin=0.0;
lmt_rec->rbs_sng=NULL;
/* Obtain metadata for record coordinate */
rcd=nco_inq_varid_flg(in_id,lmt_rec->nm,&var_id);
if(rcd == NC_NOERR){
char *cln_att_sng=NULL;
lmt_rec->rbs_sng=nco_lmt_get_udu_att(in_id,var_id,"units");
cln_att_sng=nco_lmt_get_udu_att(in_id,var_id,"calendar");
lmt_rec->lmt_cln=nco_cln_get_cln_typ(cln_att_sng);
if(cln_att_sng) cln_att_sng=(char*)nco_free(cln_att_sng);
}else{ /* endif record coordinate exists */
/* Record dimension, but not record coordinate, exists, which is fine. Reset return code. */
rcd=NC_NOERR;
} /* endif record coordinate exists */
} /* endif ncra, ncrcat */
} /* endif record dimension exists */
if(rec_dmn_id != NCO_REC_DMN_UNDEFINED){
for(idx=0;idx<nbr_dmn_fl;idx++){
if(!strcmp(lmt_rec->nm,lmt_all_lst[idx]->dmn_nm)){
lmt_all_rec=lmt_all_lst[idx];
/* Can only have one record limit */
if(lmt_all_rec->lmt_dmn_nbr > 1L){
(void)fprintf(stdout,"%s: Although this program allows multiple hyperslab limits for a single dimension, it allows only one unwrapped limit for the record dimension \"%s\". You have specified %i.\n",nco_prg_nm_get(),lmt_all_rec->dmn_nm,lmt_all_rec->lmt_dmn_nbr);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id==ncra || nco_prg_id==ncrcat){
/* Change record dim in lmt_all_lst so that cnt=1 */
lmt_all_lst[idx]->dmn_cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srt=0L;
lmt_all_lst[idx]->lmt_dmn[0]->end=0L;
lmt_all_lst[idx]->lmt_dmn[0]->cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srd=1L;
} /* endif ncra || ncrcat */
break;
} /* endif current limit applies to record dimension */
} /* end loop over all dimensions */
} /* end if file has record dimension */
/* Is this an ARM-format data file? */
CNV_ARM=nco_cnv_arm_inq(in_id);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_srt=nco_cnv_arm_base_time_get(in_id);
/* Fill-in variable structure list for all extracted variables */
var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr);
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
/* Extraction list no longer needed */
xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
/* Add input file list global attribute */
if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr);
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
(void)nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,MPI_COMM_WORLD);
#endif /* !ENABLE_MPI */
/* Pre-processor token spaghetti here is necessary so that
1. UP/SMP/MPI codes all zero srt vectors before calling nco_var_val_cpy()
2. No codes zero srt vectors more than once */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
/* (void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix); */
(void)nco_msa_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix,lmt_all_lst,nbr_dmn_fl);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#else /* !ENABLE_MPI */
/* Close first input netCDF file (SMP only since MPI code immediate re-opens) */
(void)nco_close(in_id);
#endif /* !ENABLE_MPI */
/* Allocate and, if necesssary, initialize accumulation space for processed variables */
for(idx=0;idx<nbr_var_prc;idx++){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
/* Allocate space for only one record */
var_prc_out[idx]->sz=var_prc[idx]->sz=var_prc[idx]->sz_rec;
} /* endif */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_malloc(var_prc_out[idx]->sz*sizeof(long int));
(void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally);
var_prc_out[idx]->val.vp=(void *)nco_malloc(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type));
(void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val);
} /* end if */
} /* end loop over idx */
#ifdef ENABLE_MPI
/* NB: Only manager code manipulates value of TKN_WRT_FREE
Pass 1: Workers construct local persistant variable lists
Open first file
mpncra and mpncrcat process first record only
mpnces ingests complete file
Workers create local list of their variables
Pass 2: Complete record/file loops with local variable lists
Workers skip first timestep (mpncra/mpncrcat)
Workers process only variables in their local list from Pass 1
This variable persistance is necessary for mpncra and mpnces
since their workers must maintain running tallies for each variable.
Variable persistance is not necessary for mpncrcat
However, we do it anyway to keep mpncrcat and mpncra similar
mpncrcat writes records as it reads them and finishes after pass 2
Pass 3:
mpnces and mpncra require a final loop to normalize and write
Write-token for this loop is passed sequentially through the ranks */
/* Begin Pass 1: Workers construct local persistant variable lists */
fl_idx=0;
/* Variables may have different ID, missing_value, type, in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
idx_rec=lmt_rec->srt;
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
/* Process all variables in first record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive any message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
if(nco_prg_id == ncrcat) TKN_WRT_FREE=True; /* File written to at this point only for ncrcat */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
/* csz: fxm Workers do not need to know Master's out_id */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs && nco_prg_id == ncrcat){ /* msg_tag_typ != msg_tag_wrk_rqs */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
/* csz: fxm delete redundant statement with two lines further down */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
/* csz: fxm dangerous---workers must get and use their own out_id's, not master's out_id */
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass){
break;
}else{ /* idx != idx_all_wrk_ass */
/* Assign this variable to this worker for rest of program */
lcl_idx_lst[lcl_nbr_var]=idx;
/* csz: got to here reading logic */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* !ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
if(var_prc_out[idx]->sz_rec > 1L) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to unpacked type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
} /* endif Worker */
printf("DEBUG: End of first pass of ncra/ncrcat at node %d\n",prc_rnk);
/* End of ncra, ncrcat section */
}else{ /* ncfe */
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
/* TKN_WRT_FREE=True; ncfe does not do file write here */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_wrk_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
lcl_idx_lst[lcl_nbr_var]=idx; /* storing the indices for subsequent processing by the worker */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
} /* endif Worker */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
nco_close(in_id);
#ifdef ENABLE_MPI
/* This barrier ensures that all nodes have reached this point together.
Otherwise, the manager code should be altered so it can deal with
nodes in different stages of execution at any time.
Daniel: I think we should be convinced of this parallelization
structure before bothering with implementing the code restructuring in
the manager that would let us remove the barrier. The barrier
should only negligibly impact performance. */
checkpointMpi(prc_rnk, 1);
#endif /* ENABLE_MPI */
/* End Pass 1: Workers construct local persistant variable lists */
printf("DEBUG: prc_rnk %d is done with 1st pass\n",prc_rnk);
/* Begin Pass 2: Complete record/file loops with local variable lists */
#endif /* !ENABLE_MPI */
/* Loop over input files */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("\nInput file %d is %s; "),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("local file %s:\n"),fl_in);
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
in_id=in_id_arr[0];
#ifdef ENABLE_MPI
printf("DEBUG: input file opened in prc_rnk %d inside the loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Variables may have different IDs and missing_values in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
for(idx_rec=lmt_rec->srt;idx_rec<=lmt_rec->end;idx_rec+=lmt_rec->srd){
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
#ifdef ENABLE_MPI
if(fl_idx == 0 && idx_rec == lmt_rec->srt){
/* MPI operators processed first record in first-stage loop */
continue;
}else{ /* a loop of idx = stored indices */
if(prc_rnk == rnk_mgr){ /* For ncrcat, Manager gives write access for each record in each file */
if(nco_prg_id == ncrcat){ /* Give Write access to write current record */
/* var_wrt_nbr=-prc_nbr+1; */
var_wrt_nbr=0;
while(var_wrt_nbr < nbr_var_prc){ /* Give write access to Workers who have some variables; wrong condn? */
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
if(msg_tag_typ == msg_tag_wrk_done) TKN_WRT_FREE=True;
if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
if(rnk_wrk == tkn_wrt_rnk){ /* Prev write completed */
TKN_WRT_FREE=True;
} /* rnk_wrk != tkn_wrt_rnk */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
tkn_wrt_rnk=rnk_wrk; /* To track who has the token */
var_wrt_nbr++;
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* End-while token request loop */
} /* !ncrcat */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
/* if(fl_idx == 0 && idx_rec == lmt_rec->srt) continue;
else a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#endif /* !ENABLE_MPI */
/* Process all variables in current record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
#if 0
/* NB: Immediately preceding MPI for scope confounds Emacs indentation
Fake end scope restores correct indentation, simplifies code-checking */
} /* fake end for */
#endif /* !0 */
#ifndef ENABLE_MPI
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(CNV_ARM,base_time_crr,base_time_srt,nco_dbg_lvl,fl_in,fl_out,idx_rec,rec_usd_cml,in_id_arr,LAST_RECORD,nbr_var_prc,nco_op_typ,out_id,prg,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
/* UP and SMP codes main loop over variables */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ);
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* end if ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
#endif /* !ENABLE_MPI */
if(var_prc_out[idx]->sz_rec > 1) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
#endif /* !ENABLE_MPI */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to disk type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP Parallel for) loop over variables */
#ifdef ENABLE_MPI
if(nco_prg_id == ncrcat){
/* Return token after writing record's last variable */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_done,MPI_COMM_WORLD);
} /* !ncrcat */
#endif /* !ENABLE_MPI */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
#ifdef ENABLE_MPI
} /* !Worker */
} /* end else ! fl_idx=0,idx_rec=srt */
#endif /* !ENABLE_MPI */
} /* end loop over idx_rec */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker */
#endif /* !ENABLE_MPI */
/* Warn if fewer than number of requested records were read and final file has been processed */
if(lmt_rec->lmt_typ == lmt_dmn_idx && lmt_rec->is_usr_spc_min && lmt_rec->is_usr_spc_max){
long rec_nbr_rqs; /* Number of records user requested */
rec_nbr_rqs=1L+(lmt_rec->max_idx-lmt_rec->min_idx)/lmt_rec->srd;
if(nco_dbg_lvl >= nco_dbg_std && fl_idx == fl_nbr-1 && rec_nbr_rqs != rec_usd_cml) (void)fprintf(stdout,gettext("%s: WARNING User requested %li records but only %li were found\n"),nco_prg_nm_get(),rec_nbr_rqs,rec_usd_cml);
} /* end if */
/* Error if no records were read and final file has been processed */
if(rec_usd_cml <= 0 && fl_idx == fl_nbr-1){
(void)fprintf(stdout,gettext("%s: ERROR No records lay within specified hyperslab\n"),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end if */
#ifdef ENABLE_MPI
} /* !Worker */
printf("DEBUG: prc_rnk %d at the end of ncra/rcat\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* End of ncra, ncrcat section */
}else{ /* ncfe */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker does the ncfe processing */
if(fl_idx == 0){
continue;
}else{ /* a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(nco_dbg_lvl,fl_idx,in_id_arr,nbr_var_prc,nco_op_typ,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */
#endif /* !ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
#ifdef ENABLE_MPI
} /* end else !fl_idx=0 */
} /* !Worker */
#endif /* !ENABLE_MPI */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Dispose local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
#ifdef ENABLE_MPI
printf("DEBUG: prc_rnk %d is out of file idx loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Normalize, multiply, etc where necessary */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only workers have indices of variables to process */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#if 0
/* NB: Immediately preceding MPI if/for scopes confound Emacs indentation
Fake end scopes restore correct indentation, simplify code-checking */
} /* fake end for */
} /* fake end if */
#endif /* !0 */
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx) shared(nbr_var_prc,nco_op_typ,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* !ENABLE_MPI */
if(var_prc[idx]->is_crd_var){
/* Return linear averages of coordinates unless computing extrema
Prevent coordinate variables from encountering nco_var_nrm_sdn() */
if((nco_op_typ != nco_op_min) && (nco_op_typ != nco_op_max)) (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
}else{ /* !var_prc[idx]->is_crd_var */
switch(nco_op_typ){
case nco_op_avg: /* Normalize sum by tally to create mean */
case nco_op_sqrt: /* Normalize sum by tally to create mean */
case nco_op_sqravg: /* Normalize sum by tally to create mean */
case nco_op_rms: /* Normalize sum of squares by tally to create mean square */
case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */
(void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */
(void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_min: /* Minimum is already in buffer, do nothing */
case nco_op_max: /* Maximum is already in buffer, do nothing */
case nco_op_ttl: /* Total is already in buffer, stuff missing values into elements with zero tally */
(void)nco_var_tll_zro_mss_val(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
default:
break;
} /* end switch */
/* Some operations require additional processing */
switch(nco_op_typ){
case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */
case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */
case nco_op_sqrt: /* Take root of mean to create root mean */
(void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */
(void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
default:
break;
} /* end switch */
} /* !var_prc[idx]->is_crd_var */
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
} /* end (OpenMP parallel for) loop over variables */
#ifdef ENABLE_MPI
printf("DEBUG: End of Normzn at prc_rnk %d\n",prc_rnk);
} /* prc_rnk == rnk_mgr */
for(idx = 0; idx < nbr_var_prc; idx++) {
assert(var_prc_out[idx]->tally == var_prc[idx]->tally);
if (var_prc_out[idx]->tally == 0) continue;
printf("DEBUG: node %d reset idx %d tally for var_prc(out) (cleanup)\n",prc_rnk,idx);
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
}
printf("DEBUG: Mgr shud prnt this too, prc_rnk %d\n",prc_rnk);
#endif /* !ENABLE_MPI */
} /* !ncra/ncfe */
#ifdef ENABLE_MPI
printf("DEBUG: After all processing; Before barrier, prc_rnk %d\n",prc_rnk);
if(prc_rnk == rnk_mgr){ /* Only Manager */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened out file\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Manually fix YYMMDD date which was mangled by averaging */
if(cnv->CCM_CCSM_CF && nco_prg_id == ncra) (void)nco_cnv_ccm_ccsm_cf_date(out_id,var_out,xtr_nbr);
/* End Pass 2: Complete record/file loops with local variable lists */
/* Begin Pass 3: */
/* End Pass 3: */
/* Add time variable to output file
NB: nco_cnv_arm_time_install() contains OpenMP critical region */
if(CNV_ARM && nco_prg_id == ncrcat) (void)nco_cnv_arm_time_install(out_id,base_time_srt,dfl_lvl);
#ifdef ENABLE_MPI
nco_close(out_id);
printf("DEBUG: Mgr prc_rnk %d closed out file %d after fixing date, time \n", prc_rnk, out_id);
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
printf("DEBUG: Mgr sent token to worker 1 for final write\n");
}else{ /* Workers */
printf("DEBUG: prc_rnk %d waiting for msg from Mgr for final write\n",prc_rnk);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
printf("DEBUG: prc_rnk %d got token for final write to %d\n",prc_rnk, out_id);
if(nco_prg_id == ncra || nco_prg_id == ncfe){
/* Copy averages to output file and free averaging buffers */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened output file for final write\n",prc_rnk);
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
/* Revert any arithmetic promotion but leave unpacked (for now) */
/* printf("DEBUG: Before nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* printf("DEBUG: After nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
printf("DEBUG: prc_rnk %d to final write var %s with idx %d val %g\n",prc_rnk,var_prc_out[idx]->nm,idx,var_prc_out[idx]->val.fp[0]);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is one in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over jdx */
/* Close output file */
nco_close(out_id);
printf("DEBUG: prc_rnk %d closed out file after writing\n",prc_rnk);
/* Send Token to Manager */
} /* end if */
if(prc_rnk == prc_nbr-1) MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); else MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* !Workers */
if(prc_rnk == rnk_mgr){ /* Only Manager */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_nbr-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
(void)nco_fl_mv(fl_out_tmp,fl_out);
} /* !Manager */
MPI_Finalize();
#else /* !ENABLE_MPI */
/* Copy averages to output file and free averaging buffers */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
for(idx=0;idx<nbr_var_prc;idx++){
/* Revert any arithmetic promotion but leave unpacked (for now) */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is 1 in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over idx */
} /* end if */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* !ENABLE_MPI */
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncra-specific memory cleanup */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) lmt_rec=nco_lmt_free(lmt_rec);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
if(lmt_nbr > 0) lmt=nco_lmt_lst_free(lmt,lmt_nbr);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr);
if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr);
#if 1
/* Free variable lists */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
#endif /* !1 */
#if 0
/* 20051027: Try ncwa free()'ing technique to avoid freeing dangling pointers */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
/* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */
if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix);
if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc);
var_prc=(var_sct **)nco_free(var_prc);
var_fix=(var_sct **)nco_free(var_fix);
var_out=(var_sct **)nco_free(var_out);
#endif /* !0 */
} /* !flg_mmr_cln */
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
affinity.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "resources.h"
#include "affinity.h"
#include "workload.h"
#include "omplib.h"
#include "mem.h"
void init_affinity(thread_str *t, local_str *local);
void end_affinity(thread_str *t, local_str *local);
void initialise_thread(local_str *local);
void initialise_global_queues(thread_str *t, local_str local);
bound_str get_bounds(bound_str space, int current_block, int total_blocks, int offset);
int get_stepsize(int low, int high, int total_blocks);
int get_most_loaded_thread(int *next_lo, bound_str *local, int nthreads);
bound_str get_work(thread_str *t, int thread_id, int nthreads);
void runloop_affinity(int loopid)
{
thread_str t;
t.global.lo = 0;
t.global.hi = N;
#pragma omp parallel default(none) shared(loopid, t)
{
local_str local;
init_affinity(&t, &local);
while(1)
{
if(local.affinity)
{
local.most_loaded = get_most_loaded_thread(t.next_lo, t.local, local.nthreads);
if(local.most_loaded == DONE)
break;
}
local.current = get_work(&t, local.most_loaded, local.nthreads);
if(local.current.hi>= t.local[local.most_loaded].hi)
{
local.current.hi = t.local[local.most_loaded].hi;
local.affinity = TRUE;
}
execute_work(loopid, local.current.lo, local.current.hi);
}
end_affinity(&t, &local);
} // end pragma
}
void init_affinity(thread_str *t, local_str *local)
{
initialise_thread(local);
#pragma omp single
{
// allocate required memory for all threads
malloc_structure(t, *local);
}
initialise_global_queues(t, *local);
/* make sure all the threads have initialised their shared data */
#pragma omp barrier
}
void end_affinity(thread_str *t, local_str *local)
{
#pragma omp barrier
#ifdef LOCK
deallocate_lock(&t->lock[local->tid]);
#pragma omp barrier // ensure all locks are deallocated before freeing memory
#endif
#pragma omp single
{
// deallocates required memory for all threads
free_structure(t);
}
}
void initialise_thread(local_str *local)
{
local->tid = get_tid();
local->nthreads = get_total_threads();
local->most_loaded = local->tid;
local->affinity = FALSE;
}
void initialise_global_queues(thread_str *t, local_str local)
{
#ifdef LOCK
allocate_lock(&t->lock[local.tid]);
#endif
t->local[local.most_loaded] = get_bounds(t->global, local.most_loaded, local.nthreads, 0);
t->next_lo[local.most_loaded] = t->local[local.most_loaded].lo;
}
bound_str get_bounds(bound_str space, int current_block, int total_blocks, int offset)
{
bound_str boundaries;
int ipt = get_stepsize(space.lo, space.hi, total_blocks);
boundaries.lo = current_block * ipt + offset;
boundaries.hi = (current_block+1) * ipt + offset;
if (boundaries.hi > space.hi) boundaries.hi = space.hi;
return boundaries;
}
int get_stepsize(int low, int high, int total_blocks)
{
return (int) ceil((double)(high - low)/(double)total_blocks);
}
bound_str get_work(thread_str *t, int thread_id, int nthreads)
{
bound_str c;
int stepsize;
#ifdef LOCK
set_lock(&t->lock[thread_id]);
#else
#pragma omp critical
{
#endif
c.lo = t->next_lo[thread_id];
stepsize = get_stepsize(c.lo, t->local[thread_id].hi, nthreads);
t->next_lo[thread_id] += stepsize;
#ifdef LOCK
unset_lock(&t->lock[thread_id]);
#else
} // end of critical
#endif
c.hi = c.lo + stepsize;
return c;
}
int get_most_loaded_thread(int *next_lo, bound_str *local, int nthreads)
{
int max_rem = 0, rem;
int i, most_loaded=DONE;
for(i=0; i<nthreads; i++)
{
rem = local[i].hi - next_lo[i];
if(rem > max_rem)
{
max_rem = rem;
most_loaded = i;
}
}
return most_loaded;
} |
flexProxDualDataKL.h | #ifndef flexProxDualKL_H
#define flexProxDualKL_H
#include "flexProx.h"
//! represents prox for a Kullback-Leibler divergence data term
/*!
\f$ \alpha(\cdot-f+f\log\frac{f}{\cdot} + \delta_{\{\bar{u} : \bar{u}> 0 \}}(\cdot)) \f$
*/
template<typename T>
class flexProxDualDataKL : public flexProx<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
public:
flexProxDualDataKL() : flexProx<T>(dualKLDataProx)
{
}
~flexProxDualDataKL()
{
if (VERBOSE > 0) printf("Destructor prox\n!");
}
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers)
{
}
#ifdef __CUDACC__
struct flexProxDualDataKLFunctor
{
__host__ __device__
flexProxDualDataKLFunctor(T alpha) : alpha(alpha){}
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = (T)0.5 * (this->alpha + thrust::get<1>(t) - std::sqrt(std::pow(thrust::get<1>(t) + this->alpha, (int)2) + (T)4 * (this->alpha * thrust::get<2>(t) * thrust::get<3>(t) - this->alpha * thrust::get<1>(t))));
}
T alpha;
};
#endif
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList)
{
#ifdef __CUDACC__
for (int k = 0; k < dualNumbers.size(); k++)
{
auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[k]].begin(), data->yTilde[dualNumbers[k]].begin(), data->sigmaElt[dualNumbers[k]].begin(), fList[k].begin()));
auto endIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[k]].end(), data->yTilde[dualNumbers[k]].end(), data->sigmaElt[dualNumbers[k]].end(), fList[k].end()));
thrust::for_each(startIterator, endIterator, flexProxDualDataKLFunctor(alpha));
}
#else
for (int i = 0; i < dualNumbers.size(); i++)
{
T* ptrY = data->y[dualNumbers[i]].data();
T* ptrYtilde = data->yTilde[dualNumbers[i]].data();
T* ptrSigma = data->sigmaElt[dualNumbers[i]].data();
T* ptrF = fList[i].data();
int numElements = (int)data->yTilde[dualNumbers[i]].size();
#pragma omp parallel for
for (int j = 0; j < numElements; j++)
{
ptrY[j] = (T)0.5 * (alpha + ptrYtilde[j] - std::sqrt(std::pow(ptrYtilde[j] + alpha, (int)2) + (T)4 * (alpha * ptrSigma[j] * ptrF[j] - alpha * ptrYtilde[j])));
}
}
#endif
}
};
#endif
|
conv_kernel_rv64.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: ddzhao@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_kernel_rv64.h"
// #include "wino_conv_kernel_arm.h" // FIXME: add wino support
// #include "wino_conv_kernel_1_arm.h" // FIXME: add wino support
#define PER_OUT_CHAN 16
void sgemm_4x16_rv64(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
void sgemm_4x4_rv64(float* biases, float* input, float* kernel, long kernel_size, float* output, long output_xy,
int activation, int layout);
void im2col_fp32_1x1(float* input, int input_xy, float* col, int col_cnt, int input_chan);
void im2col_fp32_3x3(float* input, int w, int h, int channel, float* cur_col, int stride);
static void interleave_kernel(float* kernel, float* kernel_interleaved, int kernel_chan, int kernel_size)
{
int i, j, k;
float* cur_kernel[PER_OUT_CHAN];
float* cur_kernel_interleaved = kernel_interleaved;
// interleave PER_OUT_CHAN kernels
for (i = 0; i + PER_OUT_CHAN - 1 < kernel_chan; i += PER_OUT_CHAN)
{
for (k = 0; k < PER_OUT_CHAN; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < PER_OUT_CHAN; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
for (; i < (kernel_chan & -4); i += 4)
{
for (k = 0; k < 4; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 4; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
}
}
// last 4 kernel
for (k = 0; k < 3; k++)
cur_kernel[k] = kernel + kernel_size * (i + k);
if ((kernel_chan & 0x3) == 3)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 3; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 2)
{
for (j = 0; j < kernel_size; j++)
{
for (k = 0; k < 2; k++)
*(cur_kernel_interleaved++) = cur_kernel[k][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
else if ((kernel_chan & 0x3) == 1)
{
for (j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel[0][j];
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
*(cur_kernel_interleaved++) = 0.f;
}
}
}
/* kernel interleave */
static void interleave(struct tensor* filter, struct conv_priv_info* priv_info, struct conv_param* param)
{
int group = param->group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size_algin = kernel_size * out_chan_align4;
int kernel_size_group = kernel_size * out_chan;
float* kernel = filter->data;
float* interleave_buf = priv_info->interleave_buffer;
for (int g = 0; g < group; g++)
{
float* cur_kernel = kernel + g * kernel_size_group;
float* cur_interleave = interleave_buf + g * kernel_size_algin;
interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size);
}
}
static void im2col(float* input, float* col, int in_c, int in_w, int in_h, int k_w, int k_h, int s_w, int s_h, int d_w,
int d_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int out_w, int out_h, int num_thread)
{
if (k_w == 1 && k_h == 1 && s_w == 1 && s_h == 1)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int out_xy = out_w * out_h;
int col_end3 = out_xy & 3;
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < out_xy - 3; col_i += 4)
{
float* cur_col = col + col_i * kernel_size;
float* cur_input = input + col_i;
im2col_fp32_1x1(cur_input, in_xy, cur_col, 4, in_c);
}
int col_i = out_xy & -4;
float* cur_col;
// final 4 input
if (col_end3)
{
cur_col = col + col_i * kernel_size;
for (int col_j = 0; col_j < kernel_size; col_j++)
{
for (int i = 0; i < 4; i++)
{
if (i < col_end3)
*cur_col++ = *(input + col_j * in_xy + col_i + i);
else
*cur_col++ = 0;
}
}
}
}
else if (d_w == 1 && d_h == 1 && k_w == 3 && k_h == 3 && s_w == s_h)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int out_xy = out_w * out_h;
int col_end3 = out_xy & 3;
int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0);
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < (out_xy & -4); col_i += 4)
{
float* cur_col = col + col_i * kernel_size;
int imy0 = col_i / out_w;
int imy3 = (col_i + 3) / out_w;
int imx0 = col_i - imy0 * out_w;
int imx3 = (col_i + 3) - imy3 * out_w;
if ((imy0 == imy3) && (is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (out_h - 1) && imx3 != (out_w - 1))))
{
float* l0 = input + (imy0 * s_h - pad_h0) * in_w + (imx0 * s_w - pad_w0);
{
im2col_fp32_3x3(l0, in_w, in_h, in_c, cur_col, s_w); // add im2col 3x3
cur_col += 4 * kernel_size;
}
}
else
{
int cnt_y[4] = {imy0, (col_i + 1) / out_w, (col_i + 2) / out_w, imy3};
int cnt_x[4] = {imx0, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2, imx3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < 3; ky++)
for (int kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
// final 4 input
int col_i = out_xy & -4;
if (col_end3)
{
float* cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
{
for (int ky = 0; ky < 3; ky++)
{
for (int kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
}
else
{
int out_xy = out_w * out_h;
#pragma omp parallel for num_threads(num_thread)
for (int col_i = 0; col_i < out_xy - 3; col_i += 4)
{
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int col_end3 = out_xy & 3;
float* cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < (k_h * d_h); ky += d_h)
for (int kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
int col_i = out_xy & -4;
float* cur_col;
int kernel_size = k_w * k_h * in_c;
int in_xy = in_w * in_h;
int col_end3 = out_xy & 3;
if (col_end3)
{
cur_col = col + col_i * kernel_size;
int cnt_y[4] = {col_i / out_w, (col_i + 1) / out_w, (col_i + 2) / out_w, (col_i + 3) / out_w};
int cnt_x[4] = {col_i - cnt_y[0] * out_w, col_i - cnt_y[1] * out_w + 1, col_i - cnt_y[2] * out_w + 2,
col_i - cnt_y[3] * out_w + 3};
int imx_start[4] = {cnt_x[0] * s_w - pad_w0, cnt_x[1] * s_w - pad_w0, cnt_x[2] * s_w - pad_w0,
cnt_x[3] * s_w - pad_w0};
int imy_start[4] = {cnt_y[0] * s_h - pad_h0, cnt_y[1] * s_h - pad_h0, cnt_y[2] * s_h - pad_h0,
cnt_y[3] * s_h - pad_h0};
for (int kch = 0; kch < in_c; kch++)
for (int ky = 0; ky < (k_h * d_h); ky += d_h)
for (int kx = 0; kx < (k_w * d_w); kx += d_w)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for (int i = 0; i < 4; i++)
{
if (i < col_end3 && imx[i] >= 0 && imx[i] < in_w && imy[i] >= 0 && imy[i] < in_h)
*cur_col++ = *(input + in_xy * kch + in_w * imy[i] + imx[i]);
else
*cur_col++ = 0.f;
}
}
}
}
}
static void sgemm_set(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start,
int ch_end, int output_xy, int activation, int num_thread, int cpu_affinity)
{
int nn_outch = ch_end / PER_OUT_CHAN;
int col_end3 = output_xy & 0x3;
if (col_end3)
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? (float*)(biases + p) : NULL;
float* kernel_tmp = (float*)(kernel + p * kernel_size);
float* output_tmp = (float*)(output + p * output_xy);
int col_line = 0;
for (col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
float* col_tmp = (float*)(col + col_line * kernel_size);
sgemm_4x16_rv64(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); // FIXME: replace with sgemm_4x16_rv64
}
{
float result[64];
float* col_tmp = (float*)(col + col_line * kernel_size);
sgemm_4x16_rv64(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, activation, 0); // FIXME: replace with sgemm_4x16_rv64
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
float* biasptr = biases ? (float*)(biases + p) : NULL;
float* kernel_tmp = (float*)(kernel + p * kernel_size);
float* output_tmp = (float*)(output + p * output_xy);
for (int col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
float* col_tmp = (float*)(col + col_line * kernel_size);
sgemm_4x16_rv64(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, activation, 0); // FIXME: replace with sgemm_4x16_rv64
}
}
}
}
static void sgemm4x4(float* col, float* kernel, float* biases, float* output, int kernel_size, int ch_start, int ch_end,
int output_xy, int activation, int num_thread, int cpu_affinity)
{
float result[16];
int col_end3 = output_xy & 0x3;
int kernel_end3 = ch_end & 0x3;
#pragma omp parallel for num_threads(num_thread) private(result)
for (int kernel_num = ch_start; kernel_num < ((ch_end & -4) - 3); kernel_num += 4)
{
float* cur_biases = NULL;
float *cur_col, *cur_kernel, *cur_output;
int col_line;
if (biases)
cur_biases = (float*)(biases + kernel_num);
cur_kernel = (float*)(kernel + kernel_num * kernel_size);
cur_output = (float*)(output + kernel_num * output_xy);
for (col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = (float*)(col + col_line * kernel_size);
sgemm_4x4_rv64(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, activation, 0);
}
if (col_end3)
{
cur_col = (float*)(col + col_line * kernel_size);
sgemm_4x4_rv64(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
if (kernel_end3)
{
int kernel_num = (ch_end & -4);
float* cur_biases = NULL;
if (biases)
cur_biases = (float*)(biases + kernel_num);
float* cur_kernel = (float*)(kernel + kernel_num * kernel_size);
#pragma omp parallel for num_threads(num_thread) private(result)
for (int col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
float* cur_col = (float*)(col + col_line * kernel_size);
sgemm_4x4_rv64(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
for (int i = 0; i < kernel_end3; i++)
for (int j = 0; j < 4; j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
int col_line = output_xy & -4;
if (col_end3)
{
float* cur_col = (float*)(col + col_line * kernel_size);
sgemm_4x4_rv64(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, activation, 0);
for (int i = 0; i < (kernel_end3); i++)
{
for (int j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int output_chan = param->output_channel;
int group = param->group;
if (in_h < 7 && in_w < 7)
return 0;
if (in_h < 10 && in_w < 10 && output_chan < 16)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3)
return 0;
if (dilation_h != 1 || dilation_w != 1 || stride_h != 1 || stride_w != 1)
return 0;
return 1;
}
/*
* get the memory size for im2col of input tensor
*/
int conv_hcl_get_shared_mem_size_rv64(struct tensor* input, struct tensor* output, struct conv_param* param)
{
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int out_cstep = out_h * out_w; // channel cstep, output_h * output_w
int elem_size = input->elem_size; // uint8/int8 is 1 byte, fp32 is 4 bytes
out_cstep = (out_cstep + 3) / 4 * 4;
int mem_size = elem_size * kernel_size * out_cstep + 128;
return mem_size;
}
/*
* get the memory size for im2col + sgemm of kernel tensor interleave
*/
static int get_private_mem_size(struct tensor* filter, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int out_chan_align4 = (out_chan + 3) / 4 * 4;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int mem_size = kernel_size * filter->elem_size * out_chan_align4 * group + 128; // caution
return mem_size;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 0;
priv_info->im2col_buffer_pack4 = NULL;
priv_info->im2col_buffer_pack4_size = 0;
return 0;
}
int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param)
{
return 0;
}
int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
// priv_info->winograd = winograd_support(param, in_h, in_w);
// if (priv_info->winograd)
// {
// if(in_c >= 256)
// // return wino_conv_hcl_prerun_1(input_tensor, filter_tensor, output_tensor, priv_info, param); // FIXME: add wino support
// else
// // return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); // FIXME: add wino support
// }
/* alloc mem of im2col */
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size_rv64(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
/* alloc mem of kernel interleave */
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
/* kernel interleave */
interleave(filter_tensor, priv_info, param);
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
// if (priv_info->winograd)
// {
// wino_conv_hcl_postrun(priv_info); // FIXME: add wino support
// }
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int input_image_size = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
// if (priv_info->winograd)
// {
// if(in_c >= 256)
// return wino_conv_hcl_run_1(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); // FIXME: add wino support
// else
// return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); // FIXME: add wino support
// }
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
int output_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
/* buffer addr */
float* input_buf = (float*)input_tensor->data;
float* output_buf = (float*)output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor != NULL)
biases_buf = (float*)bias_tensor->data;
float* col_buf = (float*)priv_info->im2col_buffer;
float* interleave_buf = (float*)priv_info->interleave_buffer;
int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN;
int sgemm_set_remain = out_c % PER_OUT_CHAN;
for (int n = 0; n < batch; n++) // batch size
{
for (int g = 0; g < group; g++)
{
/* im2col */
float* cur_input = input_buf + n * input_image_size + g * input_size;
im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h, stride_w, stride_h, dilation_w, dilation_h,
pad_w0, pad_w1, pad_h0, pad_h1, out_w, out_h, num_thread);
/* gemm */
float* cur_kernel = interleave_buf + g * kernel_size * out_c_align;
float* cur_output = output_buf + n * output_image_size + g * output_size;
float* cur_bias = biases_buf ? (biases_buf + g * out_c) : NULL;
sgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, 0, sgemm_set_chan, out_hw, act_type,
num_thread, cpu_affinity);
if (sgemm_set_remain)
sgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, sgemm_set_chan, out_c, out_hw,
act_type, num_thread, cpu_affinity);
}
}
return 0;
}
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
namespace rnn_enum {
enum RNNOpInputs {kData, kParams, kState, kStateCell, kSequenceLength};
enum RNNOpOutputs {kOut, kStateOut, kStateCellOut};
enum RNNModeType {kRnnRelu, kRnnTanh, kLstm, kGru};
enum RNNOpResource {kTempSpace, kCuDNNDropoutDescSpace};
}
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout) {
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * H * D; j++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2: NULL;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2 : NULL;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = NULL;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = NULL;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + H * 2: NULL;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + H * 2 : NULL;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = NULL;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = NULL;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error("%s/%s does not support atomic %s for message size %zu bytes", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error("%s/%s does not support required "#_size"-bit atomic: %s", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_SEND_COMP)] = "send completion event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV)] = "tag or active message event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV_SIG)] = "signaled message event",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
perf->params = *params;
perf->offset = 0;
perf->allocator = ucx_perf_mem_type_allocators[params->mem_type];
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query(%s/%s) failed: %s",
params->uct.tl_name, params->uct.dev_name,
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("%s/%s does not support operation %s",
params->uct.tl_name, params->uct.dev_name,
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
if (!(md_attr.cap.access_mem_type == params->mem_type) &&
!(md_attr.cap.reg_mem_types & UCS_BIT(params->mem_type))) {
ucs_error("Unsupported memory type %s by %s/%s",
ucs_memory_type_names[params->mem_type],
params->uct.tl_name, params->uct.dev_name);
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
ucs_status_ptr_t *reqs;
ucp_tag_recv_info_t info;
ucs_status_t status;
unsigned i;
reqs = calloc(sizeof(*reqs), group_size);
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep);
}
}
for (i = 0; i < group_size; ++i) {
if (!UCS_PTR_IS_PTR(reqs[i])) {
continue;
}
do {
ucp_worker_progress(perf->ucp.worker);
status = ucp_request_test(reqs[i], &info);
} while (status == UCS_INPROGRESS);
ucp_request_release(reqs[i]);
}
free(reqs);
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->ucp.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address, remote_info->ucp.addr_len);
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
/* force wireup completion */
status = ucp_worker_flush(perf->ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed: %s", ucs_status_string(status));
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
ucp_perf_barrier(perf);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
(void*)perf->ucp.worker);
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = 0;
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory type %s",
ucs_memory_type_names[params->mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer = UCS_PTR_BYTE_OFFSET(tctx[ti].perf.send_buffer,
ti * message_size);
tctx[ti].perf.recv_buffer = UCS_PTR_BYTE_OFFSET(tctx[ti].perf.recv_buffer,
ti * message_size);
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
main_fourier.c | #include<stdio.h>
#include<stdlib.h>
#include "gdal.h"
#include<omp.h>
#include "fourier.h"
#include "fillin.h"
#include "movavg.h"
#define OBSERVATION_MAX 46
#define MAXFILES 5000
//NODATA, Top and Bottom Boundary conditions
#define NODATA -32768
#define TBC 10000
#define BBC -10000
#define RC 538
void usage()
{
printf( "-----------------------------------------\n");
printf( "--Modis Processing chain--OpenMP code----\n");
printf( "-----------------------------------------\n");
printf( "./fourier in[in,in,in...]\n");
printf( "\tout[out,out,out...]\n");
printf( "-----------------------------------------\n");
return;
}
// double ndvi_sim[OBSERVATION_MAX] = {0.0};
// double ndvi_obs[OBSERVATION_MAX] = {1.56,1.46,1.51,1.46,1.40,1.41,1.35,1.43,1.33,1.33,1.44,1.44,1.47,1.44,1.60,1.70,1.64,1.65,1.85,2.06,2.14,2.09,2.15,2.20,2.39,2.58,2.36,2.15,2.12,2.63,2.16,2.23,2.24,2.34,2.23,2.11,2.00,1.85,2.05,1.82,1.71,1.76,1.77,1.61,1.69,1.60};
int main( int argc, char *argv[] )
{
if( argc < 4 ){
usage();
return 1;
}
//argv[0]="./fourier"
//argv[last]=(null)
if((argc-1)%2!=0){
printf("argv[0]=%s\n",argv[0]);
printf("argc=%i\n",argc);
printf("argcm2=%i\n",argc%2);
printf("input number != output number\n");
exit(1);
}
char *in,*out;
int i,j, length = (argc-1)/2;
int vegetated_seasons=4;
int imagesperyear=23;//23 for 16 days data, 46 for 8 days data
int harmonic_number = vegetated_seasons*length/imagesperyear;
printf("harmonic_number=%i\n",harmonic_number);
double t_obs[MAXFILES+1]; // temporal signature observed
double t_sim[MAXFILES+1]; // temporal signature simulated
double t_fil[MAXFILES+1]; // temporal signature filled in
double t_avg[MAXFILES+1]; // temporal signature moving averaged
GDALAllRegister();
GDALDatasetH hD[MAXFILES+1];
GDALDatasetH hDOut[MAXFILES+1];
GDALRasterBandH hB[MAXFILES+1];
GDALRasterBandH hBOut[MAXFILES+1];
for(i=0;i<length;i++){
in=argv[i+1];
// printf("i=%i / length = %i\n",i, length);
// printf("in=%s\n",in);
hD[i] = GDALOpen(in,GA_ReadOnly);
if(hD[i]==NULL){
printf("%s could not be loaded\n",in);
exit(1);
}
hB[i] = GDALGetRasterBand(hD[i],1);
}
GDALDriverH hDr = GDALGetDatasetDriver(hD[0]);
char **options = NULL;
options = CSLSetNameValue( options, "TILED", "YES" );
options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" );
options = CSLSetNameValue( options, "PREDICTOR", "2" );
for(i=length+1;i<argc;i++){
j=i-length-1;
out=argv[i];
// printf("j=%i / length = %i\n",j, length);
// printf("out=%s\n",out);
hDOut[j] = GDALCreateCopy(hDr,out,hD[0],FALSE,options,NULL,NULL);
hBOut[j] = GDALGetRasterBand(hDOut[j],1);
}
int nX = GDALGetRasterBandXSize(hB[1]);
int nY = GDALGetRasterBandYSize(hB[1]);
int N = nX*nY;
float *l[MAXFILES+1];
float *lOut[MAXFILES+1];
int rowcol=N;
//Load all images into RAM (Careful with that!)
for(i=0;i<length;i++){
lOut[i] = (float *) malloc(sizeof(float)*N);
for(rowcol=0;rowcol<N;rowcol++){
lOut[i][rowcol] = 0.0;
}
l[i] = (float *) malloc(sizeof(float)*N);
GDALRasterIO(hB[i],GF_Read,0,0,nX,nY,l[i],nX,nY,GDT_Float32,0,0);
GDALRasterIO(hBOut[i],GF_Read,0,0,nX,nY,lOut[i],nX,nY,GDT_Float32,0,0);
}
// printf("FILL IN NODATA\n");
// PART 1: FILL IN
int countNODATA=0;
#pragma omp parallel for default(none) \
shared(l, lOut, length, harmonic_number, N) \
private (rowcol,t_obs,t_sim,t_fil,t_avg,i,countNODATA)
for(rowcol=0;rowcol<N;rowcol++){
// printf("%i / %i\n",rowcol, N);
countNODATA=0;
for(i=0;i<length;i++){
t_sim[i]=0.0;
t_obs[i]=l[i][rowcol];
if(t_obs[i]>TBC||t_obs[i]<BBC){
t_obs[i]=NODATA;
countNODATA++;
}
}
if(rowcol==RC){
printf("NOData,");
for(i=0;i<length;i++){
printf("%f,",t_obs[i]);
}
printf("\n");
}
if(countNODATA){
// printf("rowcol=%i\n",rowcol);
fillin(t_sim,t_obs,length,NODATA);
countNODATA=0;
}
for(i=0;i<length;i++){
if(t_obs[i]==NODATA){
t_fil[i]=t_sim[i];
} else {
t_fil[i]=t_obs[i];
}
}
if(rowcol==RC){
printf("FilledInData,");
for(i=0;i<length;i++){
printf("%f,",t_fil[i]);
}
printf("\n");
}
movavg(t_avg,t_fil,length);
if(rowcol==RC){
printf("MovAvgData,");
for(i=0;i<length;i++){
printf("%f,",t_avg[i]);
}
printf("\n");
}
for(i=0;i<length;i++){
lOut[i][rowcol]=t_avg[i];
}
}
#pragma omp barrier
// printf("FOURIER\n");
// PART 2: FOURIER
#pragma omp parallel for default(none) \
shared(l, lOut, length, harmonic_number, N) \
private (rowcol,t_obs,t_sim,i)
for(rowcol=0;rowcol<N;rowcol++){
// printf("%i / %i\n",rowcol, N);
for(i=0;i<length;i++){
t_sim[i]=0.0;
t_obs[i]=lOut[i][rowcol];
}
fourier(t_sim,t_obs,length,harmonic_number);
for(i=0;i<length;i++){
lOut[i][rowcol]=t_sim[i];
}
if(rowcol==RC){
printf("FourierData,");
for(i=0;i<length;i++){
printf("%f,",t_sim[i]);
}
printf("\n");
}
}
#pragma omp barrier
for(i=0;i<length;i++){
GDALRasterIO(hBOut[i],GF_Write,0,0,nX,nY,lOut[i],nX,nY,GDT_Float32,0,0);
if(l[i]!= NULL) free( l[i] );
if(lOut[i]!= NULL) free( lOut[i] );
if(hD[i]!=NULL) GDALClose(hD[i]);
if(hDOut[i]!=NULL) GDALClose(hDOut[i]);
}
return(EXIT_SUCCESS);
}
|
parallel_for_lastprivate.c | #include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int
check_parallel_for_lastprivate (FILE * logFile)
{
int sum = 0;
/*int sum0=0; */
int known_sum;
int i;
int i0 = -1;
#pragma omp parallel for reduction(+:sum) schedule(static,7) lastprivate(i0)
for (i = 1; i <= LOOPCOUNT; i++)
{
sum = sum + i;
i0 = i;
} /*end of for */
/* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return ((known_sum == sum) && (i0 == LOOPCOUNT));
} /* end of check_parallel_for_lastprivate */
int
crosscheck_parallel_for_lastprivate (FILE * logFile)
{
int sum = 0;
/*int sum0=0; */
int known_sum;
int i;
int i0 = -1;
#pragma omp parallel for reduction(+:sum) schedule(static,7) private(i0)
for (i = 1; i <= LOOPCOUNT; i++)
{
sum = sum + i;
i0 = i;
} /*end of for */
/* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return ((known_sum == sum) && (i0 == LOOPCOUNT));
} /* end of check_parallel_for_lastprivate */
|
private-clauseModificado3.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void main(){
int i, n=7;
int a[n], suma;
for(i=0;i<n;i++){
a[i]=i;
}
#pragma omp parallel
{
suma=0;
#pragma omp for
for(i=0;i<n;i++){
suma = suma + a[i];
printf("\nthread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
}
|
mpc_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_MPC_CONTACT_CRITERIA_H)
#define KRATOS_MPC_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/variable_utils.h"
#include "custom_utilities/contact_utilities.h"
#include "processes/simple_mortar_mapper_wrapper_process.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class MPCContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Custom convergence criteria for the contact problem
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class MPCContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of MPCContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( MPCContactCriteria );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The components containers
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
// Geometry definition
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
typedef CouplingGeometry<NodeType> CouplingGeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructors
explicit MPCContactCriteria()
: BaseType()
{
}
///Copy constructor
MPCContactCriteria( MPCContactCriteria const& rOther )
: BaseType(rOther)
{
}
/// Destructor
~MPCContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Criterias that need to be called before getting the solution
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PreCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
BaseType::PreCriteria(rModelPart, rDofSet, rA, rDx, rb);
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// We initailize the contact force
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
const auto it_node_begin = r_nodes_array.begin();
// We save the current WEIGHTED_GAP in the buffer and reset the CONTACT_FORCE
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
it_node->SetValue(CONTACT_FORCE, zero_array);
it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
}
// Compute weighted gap
ComputeWeightedGap(rModelPart);
// Reset the NODAL_AREA
VariableUtils().SetNonHistoricalVariableToZero(NODAL_AREA, r_nodes_array);
return true;
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// We call the base class
BaseType::PostCriteria(rModelPart, rDofSet, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info[NL_ITERATION_NUMBER] > 0) {
// Getting REACTION_CHECK_STIFFNESS_FACTOR
const double reaction_check_stiffness_factor = r_process_info.Has(REACTION_CHECK_STIFFNESS_FACTOR) ? r_process_info.GetValue(REACTION_CHECK_STIFFNESS_FACTOR) : 1.0e-12;
// Compute weighted gap
ComputeWeightedGap(rModelPart);
// Transfer reaction from master to slave
std::size_t sub_contact_counter = 0;
CounterContactModelParts(rModelPart, sub_contact_counter);
// Mapping reaction
Parameters mapping_parameters = Parameters(R"({"distance_threshold" : 1.0e24, "update_interface" : false, "origin_variable" : "REACTION", "mapping_coefficient" : -1.0e0})" );
if (r_process_info.Has(DISTANCE_THRESHOLD)) {
mapping_parameters["distance_threshold"].SetDouble(r_process_info[DISTANCE_THRESHOLD]);
}
auto& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
for (std::size_t i_contact = 0; i_contact < sub_contact_counter; ++i_contact) {
auto& r_sub = r_contact_model_part.GetSubModelPart("ContactSub" + std::to_string(i_contact));
auto& r_sub_master = r_sub.GetSubModelPart("MasterSubModelPart" + std::to_string(i_contact));
auto& r_sub_slave = r_sub.GetSubModelPart("SlaveSubModelPart" + std::to_string(i_contact));
SimpleMortarMapperProcessWrapper(r_sub_master, r_sub_slave, mapping_parameters).Execute();
}
// TODO: Add frictional check
// Getting process info
Properties::Pointer p_properties = rModelPart.Elements().begin()->pGetProperties();
for (auto& r_elements : rModelPart.Elements()) {
if (r_elements.pGetProperties()->Has(YOUNG_MODULUS)) {
p_properties = r_elements.pGetProperties();
}
}
// Defining the convergence
IndexType is_active_set_converged = 0, is_slip_converged = 0;
// Checking just after first iteration
// We get the YOUNG_MODULUS
const double young_modulus = p_properties->Has(YOUNG_MODULUS) ? p_properties->GetValue(YOUNG_MODULUS) : 0.0;
const double auxiliar_check = young_modulus > 0.0 ? -(reaction_check_stiffness_factor * young_modulus) : 0.0;
// We check the active/inactive set during the first non-linear iteration or for the general semi-smooth case
NodesArrayType& r_nodes_array = r_contact_model_part.Nodes();
const auto it_node_begin = r_nodes_array.begin();
// If frictionaless or mesh tying
if (rModelPart.IsNot(SLIP)) {
#pragma omp parallel for reduction(+:is_active_set_converged)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
if (it_node->Is(SLAVE)) {
// The contact force corresponds with the reaction in the normal direction
const array_1d<double, 3>& r_total_force = it_node->FastGetSolutionStepValue(REACTION);
const double nodal_area = it_node->Has(NODAL_AREA) ? it_node->GetValue(NODAL_AREA) : 1.0;
const double gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP)/nodal_area;
const array_1d<double, 3>& r_normal = it_node->FastGetSolutionStepValue(NORMAL);
const double contact_force = inner_prod(r_total_force, r_normal);
const double contact_pressure = contact_force/it_node->GetValue(NODAL_MAUX);
if (contact_pressure < auxiliar_check || gap < 0.0) { // NOTE: This could be conflictive (< or <=)
// We save the contact force
it_node->SetValue(CONTACT_FORCE, contact_force/it_node->GetValue(NODAL_PAUX) * r_normal);
it_node->SetValue(NORMAL_CONTACT_STRESS, contact_pressure);
if (it_node->IsNot(ACTIVE)) {
it_node->Set(ACTIVE, true);
is_active_set_converged += 1;
}
} else {
if (it_node->Is(ACTIVE)) {
it_node->Set(ACTIVE, false);
is_active_set_converged += 1;
}
}
}
}
} else { // If frictional
#pragma omp parallel for reduction(+:is_active_set_converged, is_slip_converged)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
if (it_node->Is(SLAVE)) {
const double auxiliar_check = young_modulus > 0.0 ? -(reaction_check_stiffness_factor * young_modulus) : 0.0;
// The contact force corresponds with the reaction in the normal direction
const array_1d<double, 3>& r_total_force = it_node->FastGetSolutionStepValue(REACTION);
const double nodal_area = it_node->Has(NODAL_AREA) ? it_node->GetValue(NODAL_AREA) : 1.0;
const double gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP)/nodal_area;
const array_1d<double, 3>& r_normal = it_node->FastGetSolutionStepValue(NORMAL);
const double contact_force = inner_prod(r_total_force, r_normal);
const double normal_contact_pressure = contact_force/it_node->GetValue(NODAL_MAUX);
if (normal_contact_pressure < auxiliar_check || gap < 0.0) { // NOTE: This could be conflictive (< or <=)
// We save the contact force
it_node->SetValue(CONTACT_FORCE, r_total_force/it_node->GetValue(NODAL_PAUX));
it_node->SetValue(NORMAL_CONTACT_STRESS, normal_contact_pressure);
if (it_node->IsNot(ACTIVE)) {
it_node->Set(ACTIVE, true);
is_active_set_converged += 1;
}
// The friction coefficient
const double tangential_contact_pressure = norm_2(r_total_force - contact_force * r_normal)/it_node->GetValue(NODAL_MAUX);
const bool is_slip = it_node->Is(SLIP);
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (tangential_contact_pressure <= - mu * contact_force) { // STICK CASE // TODO: Check the <=
it_node->SetValue(TANGENTIAL_CONTACT_STRESS, tangential_contact_pressure);
if (is_slip) {
it_node->Set(SLIP, false);
is_slip_converged += 1;
}
} else { // SLIP CASE
it_node->SetValue(TANGENTIAL_CONTACT_STRESS, - mu * contact_force);
if (!is_slip) {
it_node->Set(SLIP, true);
is_slip_converged += 1;
}
}
} else {
if (it_node->Is(ACTIVE)) {
it_node->Set(ACTIVE, false);
it_node->Reset(SLIP);
is_active_set_converged += 1;
}
}
}
}
}
// We set the constraints active and inactive in function of the active set
ConditionsArrayType& r_conditions_array = rModelPart.GetSubModelPart("ComputingContact").Conditions();
auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
const auto& r_slave_geometry = it_cond->GetGeometry().GetGeometryPart(CouplingGeometryType::Master);
std::size_t counter = 0;
for (auto& r_node : r_slave_geometry) {
if (r_node.IsNot(ACTIVE)) {
++counter;
}
}
// In case of traction we deactivate
if (counter == r_slave_geometry.size()) {
it_cond->Set(ACTIVE, false);
// We deactivate the constraints on inactive conditions
if (it_cond->Has(CONSTRAINT_POINTER)) {
auto p_const = it_cond->GetValue(CONSTRAINT_POINTER);
// In case of traction we deactivate
p_const->Set(ACTIVE, false);
} else {
KRATOS_ERROR << "Contact conditions must have defined CONSTRAINT_POINTER" << std::endl;
}
}
}
// We save to the process info if the active set has converged
const bool active_set_converged = (is_active_set_converged == 0 ? true : false);
const bool slip_set_converged = (is_slip_converged == 0 ? true : false);
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (active_set_converged) {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tActive set") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
} else {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tActive set") << " convergence is " << BOLDFONT(FRED("not achieved")) << std::endl;
}
if (slip_set_converged) {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tSlip set") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
} else {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tSlip set") << " convergence is " << BOLDFONT(FRED("not achieved")) << std::endl;
}
}
return (active_set_converged && slip_set_converged);
}
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart The model part of interest
*/
void Initialize(ModelPart& rModelPart) override
{
BaseType::Initialize(rModelPart);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
*/
void ComputeWeightedGap(ModelPart& rModelPart)
{
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
// Set to zero the weighted gap
if (rModelPart.Is(SLIP)) {
// Reset
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_GAP, r_nodes_array);
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_SLIP, r_nodes_array);
} else {
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_GAP, r_nodes_array);
}
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
}
/**
* @brief This method computes the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rCounter Reference to the counter
*/
void CounterContactModelParts(
ModelPart& rModelPart,
std::size_t& rCounter
)
{
for (auto& r_name : rModelPart.GetSubModelPartNames()) {
if (r_name.find("ContactSub") != std::string::npos && r_name.find("ComputingContactSub") == std::string::npos) {
++rCounter;
}
auto& r_sub = rModelPart.GetSubModelPart(r_name);
if (r_sub.IsSubModelPart()) {
CounterContactModelParts(r_sub, rCounter);
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Serialization
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Class MPCContactCriteria
///@name Explicit Specializations
///@{
} // namespace Kratos
#endif /* KRATOS_MPC_CONTACT_CRITERIA_H defined */
|
floorplan.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "app-desc.h"
#include "bots.h"
#define ROWS 64
#define COLS 64
#define DMAX 64
#define max(a, b) ((a > b) ? a : b)
#define min(a, b) ((a < b) ? a : b)
int solution = -1;
typedef int coor[2];
typedef char ibrd[ROWS][COLS];
typedef char (*pibrd)[COLS];
FILE * inputFile;
struct cell {
int n;
coor *alt;
int top;
int bot;
int lhs;
int rhs;
int left;
int above;
int next;
};
struct cell * gcells;
int MIN_AREA;
ibrd BEST_BOARD;
coor MIN_FOOTPRINT;
int N;
/* compute all possible locations for nw corner for cell */
static int starts(int id, int shape, coor *NWS, struct cell *cells) {
int i, n, top, bot, lhs, rhs;
int rows, cols, left, above;
/* size of cell */
rows = cells[id].alt[shape][0];
cols = cells[id].alt[shape][1];
/* the cells to the left and above */
left = cells[id].left;
above = cells[id].above;
/* if there is a vertical and horizontal dependence */
if ((left >= 0) && (above >= 0)) {
top = cells[above].bot + 1;
lhs = cells[left].rhs + 1;
bot = top + rows;
rhs = lhs + cols;
/* if footprint of cell touches the cells to the left and above */
if ((top <= cells[left].bot) && (bot >= cells[left].top) &&
(lhs <= cells[above].rhs) && (rhs >= cells[above].lhs))
{ n = 1; NWS[0][0] = top; NWS[0][1] = lhs; }
else { n = 0; }
/* if there is only a horizontal dependence */
} else if (left >= 0) {
/* highest initial row is top of cell to the left - rows */
top = max(cells[left].top - rows + 1, 0);
/* lowest initial row is bottom of cell to the left */
bot = min(cells[left].bot, ROWS);
n = bot - top + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = i + top;
NWS[i][1] = cells[left].rhs + 1;
}
} else {
/* leftmost initial col is lhs of cell above - cols */
lhs = max(cells[above].lhs - cols + 1, 0);
/* rightmost initial col is rhs of cell above */
rhs = min(cells[above].rhs, COLS);
n = rhs - lhs + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = cells[above].bot + 1;
NWS[i][1] = i + lhs;
} }
return (n);
}
/* lay the cell down on the board in the rectangular space defined
by the cells top, bottom, left, and right edges. If the cell can
not be layed down, return 0; else 1.
*/
static int lay_down(int id, ibrd board, struct cell *cells) {
int i, j, top, bot, lhs, rhs;
top = cells[id].top;
bot = cells[id].bot;
lhs = cells[id].lhs;
rhs = cells[id].rhs;
for (i = top; i <= bot; i++) {
for (j = lhs; j <= rhs; j++) {
if (board[i][j] == 0) board[i][j] = (char)id;
else return(0);
} }
return (1);
}
#define read_integer(file,var) \
if ( fscanf(file, "%d", &var) == EOF ) {\
bots_message(" Bogus input file\n");\
exit(-1);\
}
static void read_inputs() {
int i, j, n;
read_integer(inputFile,n);
N = n;
gcells = (struct cell *) malloc((n + 1) * sizeof(struct cell));
gcells[0].n = 0;
gcells[0].alt = 0;
gcells[0].top = 0;
gcells[0].bot = 0;
gcells[0].lhs = -1;
gcells[0].rhs = -1;
gcells[0].left = 0;
gcells[0].above = 0;
gcells[0].next = 0;
for (i = 1; i < n + 1; i++) {
read_integer(inputFile, gcells[i].n);
gcells[i].alt = (coor *) malloc(gcells[i].n * sizeof(coor));
for (j = 0; j < gcells[i].n; j++) {
read_integer(inputFile, gcells[i].alt[j][0]);
read_integer(inputFile, gcells[i].alt[j][1]);
}
read_integer(inputFile, gcells[i].left);
read_integer(inputFile, gcells[i].above);
read_integer(inputFile, gcells[i].next);
}
if (!feof(inputFile)) {
read_integer(inputFile, solution);
}
}
static void write_outputs() {
int i, j;
bots_message("Minimum area = %d\n\n", MIN_AREA);
for (i = 0; i < MIN_FOOTPRINT[0]; i++) {
for (j = 0; j < MIN_FOOTPRINT[1]; j++) {
if (BEST_BOARD[i][j] == 0) {bots_message(" ");}
else bots_message("%c", 'A' + BEST_BOARD[i][j] - 1);
}
bots_message("\n");
}
}
#ifdef MANUAL_CUTOFF
static int add_cell_ser (int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) {
int i, j, nn, nn2, area;
ibrd board;
coor footprint, NWS[DMAX];
nn2 = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nn2 += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
struct cell *cells = CELLS;
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nn2 += add_cell_ser(cells[id].next, footprint, board,cells);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
return nn2;
}
#endif
#if defined(IF_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn,level) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \
if(level<bots_cutoff_value)
{
struct cell cells[N+1];
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#elif defined(FINAL_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task private(footprint,area) \
firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \
final(level >= bots_cutoff_value) mergeable
{
ibrd board;
struct cell *cells;
if ( omp_in_final() && level > bots_cutoff_value ) {
cells = CELLS;
} else {
cells = alloca(sizeof(struct cell)*(N+1));
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
}
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#elif defined(MANUAL_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) shared(nnc) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,bots_verbose_mode)
{
struct cell *cells;
cells = (struct cell*)alloca(sizeof(struct cell)*(N+1));
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
if(level+1 < bots_cutoff_value ) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
} else {
#pragma omp atomic
nnc += add_cell_ser(cells[id].next, footprint, board,cells);
}
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#else
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) {
int i, j, nn, area, nnc,nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode)
{
struct cell cells[N+1];
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#endif
ibrd board;
void floorplan_init (char *filename)
{
int i,j;
inputFile = fopen(filename, "r");
if(NULL == inputFile) {
bots_message("Couldn't open %s file for reading\n", filename);
exit(1);
}
/* read input file and initialize global minimum area */
read_inputs();
MIN_AREA = ROWS * COLS;
/* initialize board is empty */
for (i = 0; i < ROWS; i++)
for (j = 0; j < COLS; j++) board[i][j] = 0;
}
void compute_floorplan (void)
{
coor footprint;
/* footprint of initial board is zero */
footprint[0] = 0;
footprint[1] = 0;
bots_message("Computing floorplan ");
#pragma omp parallel
{
#pragma omp single
#if defined(MANUAL_CUTOFF) || defined(IF_CUTOFF) || defined(FINAL_CUTOFF)
bots_number_of_tasks = add_cell(1, footprint, board, gcells,0);
#else
bots_number_of_tasks = add_cell(1, footprint, board, gcells);
#endif
}
bots_message(" completed!\n");
}
void floorplan_end (void)
{
/* write results */
write_outputs();
}
int floorplan_verify (void)
{
if (solution != -1 )
return MIN_AREA == solution ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL;
else
return BOTS_RESULT_NA;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=1.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
{
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels,
*magick_restrict quantum_pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
quantum_pixels=(const Quantum *) NULL;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
quantum_pixels=pixels;
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
quantum_pixels=pixels;
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentially white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
if (quantum_pixels != (const Quantum *) NULL)
{
SetPixelChannel(morphology_image,channel,quantum_pixels[i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
omp_atomic.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int test_omp_atomic()
{
int sum;
int diff;
double dsum = 0;
double dt = 0.5; /* base of geometric row for + and - test*/
double ddiff;
int product;
int x;
int *logics;
int bit_and = 1;
int bit_or = 0;
int exclusiv_bit_or = 0;
int j;
int known_sum;
int known_diff;
int known_product;
int result = 0;
int logic_and = 1;
int logic_or = 0;
double dknown_sum;
double rounding_error = 1.E-9;
double dpt, div;
int logicsArray[LOOPCOUNT];
logics = logicsArray;
sum = 0;
diff = 0;
product = 1;
// sum of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++) {
#pragma omp atomic
sum += i;
}
}
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum)
{
fprintf(stderr,
"Error in sum with integers: Result was %d instead of %d.\n",
sum, known_sum);
result++;
}
// difference of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++) {
#pragma omp atomic
diff -= i;
}
}
known_diff = ((LOOPCOUNT - 1) * LOOPCOUNT) / 2 * -1;
if (diff != known_diff)
{
fprintf (stderr,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
result++;
}
// sum of doubles test
dsum = 0;
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j) {
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 -dt);
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i) {
#pragma omp atomic
dsum += pow (dt, i);
}
}
if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error)) {
fprintf (stderr, "Error in sum with doubles: Result was %f"
" instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
result++;
}
// difference of doubles test
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j) {
dpt *= dt;
}
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i) {
#pragma omp atomic
ddiff -= pow (dt, i);
}
}
if (fabs (ddiff) > rounding_error) {
fprintf (stderr,
"Error in difference with doubles: Result was %E instead of 0.0\n",
ddiff);
result++;
}
// product of integers test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++) {
#pragma omp atomic
product *= i;
}
}
known_product = KNOWN_PRODUCT;
if (known_product != product) {
fprintf (stderr,
"Error in product with integers: Result was %d instead of %d\n",
product, known_product);
result++;
}
// division of integers test
product = KNOWN_PRODUCT;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; ++i) {
#pragma omp atomic
product /= i;
}
}
if (product != 1) {
fprintf (stderr,
"Error in product division with integers: Result was %d"
" instead of 1\n",
product);
result++;
}
// division of doubles test
div = 5.0E+5;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++) {
#pragma omp atomic
div /= i;
}
}
if (fabs(div-0.137787) >= 1.0E-4 ) {
result++;
fprintf (stderr, "Error in division with double: Result was %f"
" instead of 0.137787\n", div);
}
// ++ test
x = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
x++;
}
}
if (x != LOOPCOUNT) {
result++;
fprintf (stderr, "Error in ++\n");
}
// -- test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
x--;
}
}
if (x != 0) {
result++;
fprintf (stderr, "Error in --\n");
}
// bit-and test part 1
for (j = 0; j < LOOPCOUNT; ++j) {
logics[j] = 1;
}
bit_and = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_and &= logics[i];
}
}
if (!bit_and) {
result++;
fprintf (stderr, "Error in BIT AND part 1\n");
}
// bit-and test part 2
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_and &= logics[i];
}
}
if (bit_and) {
result++;
fprintf (stderr, "Error in BIT AND part 2\n");
}
// bit-or test part 1
for (j = 0; j < LOOPCOUNT; j++) {
logics[j] = 0;
}
bit_or = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_or |= logics[i];
}
}
if (bit_or) {
result++;
fprintf (stderr, "Error in BIT OR part 1\n");
}
// bit-or test part 2
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
bit_or |= logics[i];
}
}
if (!bit_or) {
result++;
fprintf (stderr, "Error in BIT OR part 2\n");
}
// bit-xor test part 1
for (j = 0; j < LOOPCOUNT; j++) {
logics[j] = 0;
}
exclusiv_bit_or = 0;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (exclusiv_bit_or) {
result++;
fprintf (stderr, "Error in EXCLUSIV BIT OR part 1\n");
}
// bit-xor test part 2
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i) {
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (!exclusiv_bit_or) {
result++;
fprintf (stderr, "Error in EXCLUSIV BIT OR part 2\n");
}
// left shift test
x = 1;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < 10; ++i) {
#pragma omp atomic
x <<= 1;
}
}
if ( x != 1024) {
result++;
fprintf (stderr, "Error in <<\n");
x = 1024;
}
// right shift test
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 0; i < 10; ++i) {
#pragma omp atomic
x >>= 1;
}
}
if (x != 1) {
result++;
fprintf (stderr, "Error in >>\n");
}
return (result == 0);
} // test_omp_atomic()
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_atomic()) {
num_failed++;
}
}
return num_failed;
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline size_t MagickMax(const size_t x,
const size_t y)
{
if (x > y)
return(x);
return(y);
}
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,2);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
pixel=(double) (i*(QuantumRange/(image->colors-1)));
GetPixelInfo(image,image->colormap+i);
image->colormap[i].alpha_trait=BlendPixelTrait;
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=OpaqueAlpha;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelInfoPixel(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity(color_2)-(int)
GetPixelInfoIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelInfoPixel(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
THTensorMath.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/THTensorMath.c"
#else
#define TH_OMP_OVERHEAD_THRESHOLD 100000
void THTensor_(fill)(THTensor *r_, real value)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, value, r__size); break;);
}
void THTensor_(zero)(THTensor *r_)
{
TH_TENSOR_APPLY(real, r_,
THVector_(fill)(r__data, 0, r__size); break;);
}
void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1) THError("Mask tensor can take 0 and 1 values only");
else if (*mask_data == 1) *tensor_data = value;);
}
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
{
THTensor *srct = THTensor_(newContiguous)(src);
real *src_data = THTensor_(data)(srct);
long cntr = 0;
long nelem = THTensor_(nElement)(srct);
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
src_data++;
cntr++;
if (cntr > nelem)
THError("Number of elements of src != mask");
});
if (cntr != nelem)
THError("Number of elements of src != mask");
THTensor_(free)(srct);
}
void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask)
{
long numel = THByteTensor_sumall(mask);
real *tensor_data;
THTensor_(resize1d)(tensor,numel);
tensor_data = THTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index)
{
long i, numel;
THLongStorage *newSize;
THTensor *tSlice, *sSlice;
long *index_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
newSize->data[dim] = numel;
THTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (src->nDimension > 1)
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, i);
THTensor_(select)(sSlice, src, dim, index_data[i]-1);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i]-1));
}
}
THLongTensor_free(index);
}
void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src)
{
long i, numel;
THTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
sSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor, dim, index_data[i]-1);
THTensor_(select)(sSlice, src, dim, i);
THTensor_(copy)(tSlice, sSlice);
THTensor_(free)(tSlice);
THTensor_(free)(sSlice);
}
else
{
THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val)
{
long i, numel;
THTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension,4,"Indexing dim is out of bounds");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THTensor_(new)();
THTensor_(select)(tSlice, tensor,dim,index_data[i]-1);
THTensor_(fill)(tSlice, val);
THTensor_(free)(tSlice);
}
else
{
THTensor_(set1d)(tensor,index_data[i]-1,val);
}
}
THLongTensor_free(index);
}
accreal THTensor_(dot)(THTensor *tensor, THTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
real THTensor_(minall)(THTensor *tensor)
{
real theMin;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(*tensor_data < theMin) theMin = *tensor_data;);
return theMin;
}
real THTensor_(maxall)(THTensor *tensor)
{
real theMax;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(*tensor_data > theMax) theMax = *tensor_data;);
return theMax;
}
accreal THTensor_(sumall)(THTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
void THTensor_(add)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] + value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THTensor_(mul)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] * value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THTensor_(div)(THTensor *r_, THTensor *t, real value)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) {
real *tp = THTensor_(data)(t);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] / value;
}
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] + value * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src)
{
THTensor_(resizeAs)(r_, t);
if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) {
real *tp = THTensor_(data)(t);
real *sp = THTensor_(data)(src);
real *rp = THTensor_(data)(r_);
long i;
#pragma omp parallel for if(THTensor_(nElement)(t) > TH_OMP_OVERHEAD_THRESHOLD) private(i)
for (i=0; i<THTensor_(nElement)(t); i++)
{
rp[i] = tp[i] / sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2)
{
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THTensor_(data)(mat), mat->stride[1],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(mat), mat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cmat = THTensor_(newContiguous)(mat);
THBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THTensor_(data)(cmat), cmat->stride[0],
THTensor_(data)(vec), vec->stride[0],
beta, THTensor_(data)(r_), r_->stride[0]);
THTensor_(free)(cmat);
}
}
void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain)
{
long N1 = m1->size[0];
long N2 = m2->size[0];
long dim;
real *m1_p;
real *m2_p;
real *r_p;
long i;
THTensor_(resize2d)(r_, N1, N2);
m1 = THTensor_(newContiguous)(m1);
m2 = THTensor_(newContiguous)(m2);
THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1);
THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2);
dim = m1->size[1];
THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim");
m1_p = THTensor_(data)(m1);
m2_p = THTensor_(data)(m2);
r_p = THTensor_(data)(r_);
#pragma omp parallel for private(i)
for (i=0; i<N1; i++) {
long j,k;
for (j=0; j<N2; j++) {
real sum = 0;
for (k=0; k<dim; k++) {
real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ];
sum += term*term;
}
r_p[ i*N2 + j ] = gain * sum;
}
}
THTensor_(free)(m1);
THTensor_(free)(m2);
}
void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
/* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */
/* r_ */
if(r_->stride[0] == 1)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1)
{
THTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THTensor_(newWithSize2d)(r_->size[1], r_->size[0]);
THTensor_(copy)(r__, r_);
THTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THTensor_(newContiguous)(m2);
}
/* do the operation */
THBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THTensor_(free)(m1_);
if(m2_ != m2)
THTensor_(free)(m2_);
if(r__ != r_)
THTensor_(freeCopyTo)(r__, r_);
}
void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THTensor_(resizeAs)(r_, t);
THTensor_(copy)(r_, t);
}
if(beta != 1)
THTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THBlas_(ger)(vec1->size[0], vec2->size[0],
alpha, THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(r_), r_->stride[0]);
}
else
{
THTensor *cr = THTensor_(newClone)(r_);
THBlas_(ger)(vec2->size[0], vec1->size[0],
alpha, THTensor_(data)(vec2), vec2->stride[0],
THTensor_(data)(vec1), vec1->stride[0],
THTensor_(data)(cr), cr->stride[0]);
THTensor_(freeCopyTo)(cr, r_);
}
}
long THTensor_(numel)(THTensor *t)
{
return THTensor_(nElement)(t);
}
void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMax = t_data[0];
for(i = 1; i < t_size; i++)
{
if(t_data[i*t_stride] > theMax)
{
theIndex = i;
theMax = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMax;);
}
void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMin = t_data[0];
for(i = 1; i < t_size; i++)
{
if(t_data[i*t_stride] < theMin)
{
theIndex = i;
theMin = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMin;);
}
void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
}
void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
}
void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension out of range");
THTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
void THTensor_(sign)(THTensor *r_, THTensor *t)
{
THTensor_(resizeAs)(r_, t);
#if defined (TH_REAL_IS_BYTE)
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else *r__data = 0;);
#else
TH_TENSOR_APPLY2(real, r_, real, t,
if (*t_data > 0) *r__data = 1;
else if (*t_data < 0) *r__data = -1;
else *r__data = 0;);
#endif
}
accreal THTensor_(trace)(THTensor *t)
{
real *t_data = THTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension)
{
int i;
if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b))
THError("inconsitent tensor sizes");
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) != THTensor_(size)(b, i))
THError("inconsistent tensor sizes");
}
if(dimension < 0)
{
for(i = 0; i < THTensor_(nDimension)(a); i++)
{
if(THTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0)
THError("no dimension of size 3");
}
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension out of range");
THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension size is not 3");
THTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(zero)(r_);
}
void THTensor_(ones)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(fill)(r_, 1);
}
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THTensor_(nDimension)(t) == 1)
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_size = THTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THTensor_(resize2d)(r_, sz, sz);
THTensor_(zero)(r_);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THTensor_(data)(t);
long t_stride_0 = THTensor_(stride)(t, 0);
long t_stride_1 = THTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k);
else
sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1));
THTensor_(resize1d)(r_, sz);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THTensor_(eye)(THTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THTensor_(resize2d)(r_, n, m);
THTensor_(zero)(r_);
i = 0;
r__data = THTensor_(data)(r_);
sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THTensor_(range)(THTensor *r_, real xmin, real xmax, real step)
{
long size;
real i = 0;
THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound incoherent with step sign");
size = (long)((xmax-xmin)/step+1);
THTensor_(resize1d)(r_, size);
TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;);
}
void THTensor_(randperm)(THTensor *r_, long n)
{
real *r__data;
long r__stride_0;
long i;
THArgCheck(n > 0, 1, "must be strictly positive");
THTensor_(resize1d)(r_, n);
r__data = THTensor_(data)(r_);
r__stride_0 = THTensor_(stride)(r_,0);
for(i = 0; i < n; i++)
r__data[i*r__stride_0] = (real)(i);
for(i = 0; i < n-1; i++)
{
long z = THRandom_random() % (n-i);
real sav = r__data[i*r__stride_0];
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
r__data[(z+i)*r__stride_0] = sav;
}
}
void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
http://www.alienryderflex.com/quicksort/
This public-domain C implementation by Darel Rex Finley.
Thanks man :)
Updated Oct 16 2013: change choice of pivot to avoid worst-case being a pre-sorted input - Daniel and Julien
Updated Oct 24 2013: change pivot comparison to strict inequality to avoid worst-case on constant input, see Sedgewick, Algorithms in C, Addison Wesley, 1990, p. 120 - Julien
*/
#define MAX_LEVELS 300
static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (arr[R*stride]>piv && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (arr[L*stride]<piv && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (arr[R*stride]<piv && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (arr[L*stride]>piv && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension");
THTensor_(resizeAs)(rt_, t);
THTensor_(copy)(rt_, t);
{
THLongStorage *size = THTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
void THTensor_(tril)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THTensor_(triu)(THTensor *r_, THTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THTensor_(nDimension)(t) == 2, 1, "not a matrix");
THTensor_(resizeAs)(r_, t);
t_size_0 = THTensor_(size)(t, 0);
t_size_1 = THTensor_(size)(t, 1);
t_stride_0 = THTensor_(stride)(t, 0);
t_stride_1 = THTensor_(stride)(t, 1);
r__stride_0 = THTensor_(stride)(r_, 0);
r__stride_1 = THTensor_(stride)(r_, 1);
r__data = THTensor_(data)(r_);
t_data = THTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
{
THLongStorage *size;
int i;
int ndim = THMax(ta->nDimension, tb->nDimension);
ndim = THMax(ndim, dimension+1);
THArgCheck(dimension >= 0, 4, "invalid dimension");
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
int tadi = (i < ta->nDimension ? ta->size[i] : 1);
int tbdi = (i < tb->nDimension ? tb->size[i] : 1);
if(i == dimension)
size->data[i] = tadi+tbdi;
else
{
if(tadi != tbdi)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
size->data[i] = tadi;
}
}
THTensor_(resize)(r_, size, NULL);
THLongStorage_free(size);
{
THTensor *nta = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(nta, NULL, dimension, 0, (dimension < ta->nDimension ? ta->size[dimension] : 1));
THTensor_(copy)(nta, ta);
THTensor_(free)(nta);
}
{
THTensor *ntb = THTensor_(newWithTensor)(r_);
THTensor_(narrow)(ntb, NULL, dimension, (dimension < ta->nDimension ? ta->size[dimension] : 1), (dimension < tb->nDimension ? tb->size[dimension] : 1));
THTensor_(copy)(ntb, tb);
THTensor_(free)(ntb);
}
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \
{ \
THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \
{ \
THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY2(real, r_, real, t, \
if (*t_data OP value) *r__data = 1;); \
} \
void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \
{ \
THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \
THTensor_(zero)(r_); \
TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \
if(*ta_data OP *tb_data) *r__data = 1;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
/* floating point only now */
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \
void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \
{ \
THTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
} \
\
LAB_IMPLEMENT_BASIC_FUNCTION(log,log)
LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p)
LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp)
LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos)
LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos)
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh)
LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin)
LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin)
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh)
LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan)
LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan)
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh)
LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt)
LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil)
LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor)
LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs)
void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
{
THTensor_(resizeAs)(r_, tx);
TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data););
}
void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum/t_size;);
}
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sqrt(sum2);
});
}
void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (sum2 < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
}
void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension");
dim = THTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += pow(fabs(t_data[i*t_stride]), value);
*r__data = pow(sum, 1.0/value);)
}
}
accreal THTensor_(normall)(THTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else {
TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value););
return pow(sum, 1.0/value);
}
}
accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += pow(fabs(*tensor_data - *src_data), value);)
return pow(sum, 1.0/value);
}
accreal THTensor_(meanall)(THTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor);
}
accreal THTensor_(varall)(THTensor *tensor)
{
accreal mean = THTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= (THTensor_(nElement)(tensor)-1);
return sum;
}
accreal THTensor_(stdall)(THTensor *tensor)
{
return sqrt(THTensor_(varall)(tensor));
}
void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = a;
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = a + i*(b-a)/((real)(n-1));
i++;
);
}
}
void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
{
real i = 0;
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
THArgCheck(a <= b, 2, "end range should be greater than start range");
THTensor_(resize1d)(r_, n);
if(n == 1) {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a);
i++;
);
} else {
TH_TENSOR_APPLY(real, r_,
*r__data = pow(10.0, a + i*(b-a)/((real)(n-1)));
i++;
);
}
}
void THTensor_(rand)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(uniform)(r_, 0, 1);
}
void THTensor_(randn)(THTensor *r_, THLongStorage *size)
{
THTensor_(resize)(r_, size, NULL);
THTensor_(normal)(r_, 0, 1);
}
void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue)
{
THTensor *clone;
real minval;
real maxval;
real bins;
real *h_data;
THTensor_(resize1d)(hist, nbins);
THTensor_(zero)(hist);
minval = minvalue;
maxval = maxvalue;
if (minval == maxval)
{
minval = THTensor_(minall)(tensor);
maxval = THTensor_(maxall)(tensor);
}
if (minval == maxval)
{
minval = minval - 1;
maxval = maxval + 1;
}
bins = (real)(nbins)-1e-6;
clone = THTensor_(newWithSize1d)(THTensor_(nElement)(tensor));
THTensor_(copy)(clone,tensor);
THTensor_(add)(clone, clone, -minval);
THTensor_(div)(clone, clone, (maxval-minval));
THTensor_(mul)(clone, clone, bins);
THTensor_(floor)(clone, clone);
THTensor_(add)(clone, clone, 1);
h_data = THTensor_(data)(hist);
TH_TENSOR_APPLY(real, clone, \
if ((*clone_data <= nbins) && (*clone_data >= 1)) { \
*(h_data + (int)(*clone_data) - 1) += 1; \
});
THTensor_(free)(clone);
}
#endif /* floating point only part */
#endif
|
atomic-10.c | /* { dg-do run } */
extern void abort (void);
int x1, x2, x3, x4, x5;
volatile int y6 = 9, y2, y3, y4, y5;
volatile unsigned char z1, z2, z3, z4, z5;
float a1, a2, a3, a4;
void
f1 (void)
{
#pragma omp atomic
x1++;
#pragma omp atomic
x2--;
#pragma omp atomic
++x3;
#pragma omp atomic
--x4;
#pragma omp atomic
x5 += 1;
#pragma omp atomic
x1 -= y6;
#pragma omp atomic
x2 |= 1;
#pragma omp atomic
x3 &= 1;
#pragma omp atomic
x4 ^= 1;
#pragma omp atomic
x5 *= 3;
#pragma omp atomic
x1 /= 3;
#pragma omp atomic
x2 /= 3;
#pragma omp atomic
x3 <<= 3;
#pragma omp atomic
x4 >>= 3;
}
void
f2 (void)
{
#pragma omp atomic
y6++;
#pragma omp atomic
y2--;
#pragma omp atomic
++y3;
#pragma omp atomic
--y4;
#pragma omp atomic
y5 += 1;
#pragma omp atomic
y6 -= x1;
#pragma omp atomic
y2 |= 1;
#pragma omp atomic
y3 &= 1;
#pragma omp atomic
y4 ^= 1;
#pragma omp atomic
y5 *= 3;
#pragma omp atomic
y6 /= 3;
#pragma omp atomic
y2 /= 3;
#pragma omp atomic
y3 <<= 3;
#pragma omp atomic
y4 >>= 3;
}
void
f3 (void)
{
#pragma omp atomic
z1++;
#pragma omp atomic
z2--;
#pragma omp atomic
++z3;
#pragma omp atomic
--z4;
#pragma omp atomic
z5 += 1;
#pragma omp atomic
z1 |= 1;
#pragma omp atomic
z2 &= 1;
#pragma omp atomic
z3 ^= 1;
#pragma omp atomic
z4 *= 3;
#pragma omp atomic
z5 /= 3;
#pragma omp atomic
z1 /= 3;
#pragma omp atomic
z2 <<= 3;
#pragma omp atomic
z3 >>= 3;
}
void
f4 (void)
{
#pragma omp atomic
a1 += 8.0;
#pragma omp atomic
a2 *= 3.5;
#pragma omp atomic
a3 -= a1 + a2;
#pragma omp atomic
a4 /= 2.0;
}
int
main (void)
{
f1 ();
if (x1 != -2 || x2 != 0 || x3 != 8 || x4 != -1 || x5 != 3)
abort ();
f2 ();
if (y6 != 4 || y2 != 0 || y3 != 8 || y4 != -1 || y5 != 3)
abort ();
f3 ();
if (z1 != 0 || z2 != 8 || z3 != 0 || z4 != 253 || z5 != 0)
abort ();
a1 = 7;
a2 = 10;
a3 = 11;
a4 = 13;
f4 ();
if (a1 != 15.0 || a2 != 35.0 || a3 != -39.0 || a4 != 6.5)
abort ();
return 0;
}
|
GB_binop__lt_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint8)
// A*D function (colscale): GB (_AxD__lt_uint8)
// D*A function (rowscale): GB (_DxB__lt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint8)
// C=scalar+B GB (_bind1st__lt_uint8)
// C=scalar+B' GB (_bind1st_tran__lt_uint8)
// C=A+scalar GB (_bind2nd__lt_uint8)
// C=A'+scalar GB (_bind2nd_tran__lt_uint8)
// C type: bool
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_UINT8 || GxB_NO_LT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp64)
// A*D function (colscale): GB (_AxD__second_fp64)
// D*A function (rowscale): GB (_DxB__second_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_fp64)
// C=A'+scalar GB (_bind2nd_tran__second_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = bij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
exp5_omp.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i,n=10;
#pragma omp parallel shared(n) private(i)
{
#pragma omp for
for(i=0;i<10;i++) {
printf("Thread %d executes iteration %d\n",omp_get_thread_num(),i);
}
}
printf("\n");
return 0;
}
|
rhs4sgcurv.c | // SW4 LICENSE
// # ----------------------------------------------------------------------
// # SW4 - Seismic Waves, 4th order
// # ----------------------------------------------------------------------
// # Copyright (c) 2013, Lawrence Livermore National Security, LLC.
// # Produced at the Lawrence Livermore National Laboratory.
// #
// # Written by:
// # N. Anders Petersson (petersson1@llnl.gov)
// # Bjorn Sjogreen (sjogreen2@llnl.gov)
// #
// # LLNL-CODE-643337
// #
// # All rights reserved.
// #
// # This file is part of SW4, Version: 1.0
// #
// # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License"
// #
// # This program is free software; you can redistribute it and/or modify
// # it under the terms of the GNU General Public License (as published by
// # the Free Software Foundation) version 2, dated June 1991.
// #
// # This program is distributed in the hope that it will be useful, but
// # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
// # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
// # conditions of the GNU General Public License for more details.
// #
// # You should have received a copy of the GNU General Public License
// # along with this program; if not, write to the Free Software
// # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#include "sw4.h"
void rhs4sgcurv( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
float_sw4* __restrict__ a_u, float_sw4* __restrict__ a_mu, float_sw4* __restrict__ a_lambda,
float_sw4* __restrict__ a_met, float_sw4* __restrict__ a_jac, float_sw4* __restrict__ a_lu,
int* onesided, float_sw4* __restrict__ a_acof, float_sw4* __restrict__ a_bope,
float_sw4* __restrict__ a_ghcof, float_sw4* __restrict__ a_strx, float_sw4* __restrict__ a_stry )
{
// subroutine CURVILINEAR4SG( ifirst, ilast, jfirst, jlast, kfirst,
// * klast, u, mu, la, met, jac, lu,
// * onesided, acof, bope, ghcof, strx, stry,
// * op )
// Routine with supergrid stretchings strx and stry. No stretching
// in z, since top is always topography, and bottom always interface
// to a deeper Cartesian grid.
// opcount:
// Interior (k>6), 2126 arithmetic ops.
// Boundary discretization (1<=k<=6 ), 6049 arithmetic ops.
const float_sw4 a1 = 0;
const float_sw4 i6 = 1.0/6;
const float_sw4 tf = 0.75;
const float_sw4 c1 = 2.0/3;
const float_sw4 c2 = -1.0/12;
const int ni = ilast-ifirst+1;
const int nij = ni*(jlast-jfirst+1);
const int base = -(ifirst+ni*jfirst+nij*kfirst);
const int base3 = 3*base-1;
const int base4 = 4*base-1;
const int ni3 = 3*ni;
const int nij3 = 3*nij;
const int ni4 = 4*ni;
const int nij4 = 4*nij;
const int ifirst0 = ifirst;
const int jfirst0 = jfirst;
// Direct reuse of fortran code by these macro definitions:
#define mu(i,j,k) a_mu[base+i+ni*(j)+nij*(k)]
#define la(i,j,k) a_lambda[base+i+ni*(j)+nij*(k)]
#define jac(i,j,k) a_jac[base+i+ni*(j)+nij*(k)]
#define u(c,i,j,k) a_u[base3+(c)+3*(i)+ni3*(j)+nij3*(k)]
#define lu(c,i,j,k) a_lu[base3+(c)+3*(i)+ni3*(j)+nij3*(k)]
#define met(c,i,j,k) a_met[base4+(c)+4*(i)+ni4*(j)+nij4*(k)]
#define strx(i) a_strx[i-ifirst0]
#define stry(j) a_stry[j-jfirst0]
#define acof(i,j,k) a_acof[(i-1)+6*(j-1)+48*(k-1)]
#define bope(i,j) a_bope[i-1+6*(j-1)]
#define ghcof(i) a_ghcof[i-1]
#pragma omp parallel
{
int kstart = kfirst+2;
if( onesided[4] == 1 )
{
kstart = 7;
// SBP Boundary closure terms
#pragma omp for
for( int k= 1; k <= 6 ; k++ )
for( int j=jfirst+2; j <= jlast-2 ; j++ )
#pragma simd
#pragma ivdep
for( int i=ifirst+2; i <= ilast-2 ; i++ )
{
// 5 ops
float_sw4 ijac = strx(i)*stry(j)/jac(i,j,k);
float_sw4 istry = 1/(stry(j));
float_sw4 istrx = 1/(strx(i));
float_sw4 istrxy = istry*istrx;
float_sw4 r1 = 0,r2 = 0,r3 = 0;
// pp derivative (u) (u-eq)
// 53 ops, tot=58
float_sw4 cof1=(2*mu(i-2,j,k)+la(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)
*strx(i-2);
float_sw4 cof2=(2*mu(i-1,j,k)+la(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)
*strx(i-1);
float_sw4 cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
float_sw4 cof4=(2*mu(i+1,j,k)+la(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)
*strx(i+1);
float_sw4 cof5=(2*mu(i+2,j,k)+la(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)
*strx(i+2);
float_sw4 mux1 = cof2 -tf*(cof3+cof1);
float_sw4 mux2 = cof1 + cof4+3*(cof3+cof2);
float_sw4 mux3 = cof2 + cof5+3*(cof4+cof3);
float_sw4 mux4 = cof4-tf*(cof3+cof5);
r1 = r1 + i6* (
mux1*(u(1,i-2,j,k)-u(1,i,j,k)) +
mux2*(u(1,i-1,j,k)-u(1,i,j,k)) +
mux3*(u(1,i+1,j,k)-u(1,i,j,k)) +
mux4*(u(1,i+2,j,k)-u(1,i,j,k)) )*istry;
// qq derivative (u) (u-eq)
// 43 ops, tot=101
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 = r1 + i6* (
mux1*(u(1,i,j-2,k)-u(1,i,j,k)) +
mux2*(u(1,i,j-1,k)-u(1,i,j,k)) +
mux3*(u(1,i,j+1,k)-u(1,i,j,k)) +
mux4*(u(1,i,j+2,k)-u(1,i,j,k)) )*istrx;
// pp derivative (v) (v-eq)
// 43 ops, tot=144
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 = r2 + i6* (
mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )*istry;
// qq derivative (v) (v-eq)
// 53 ops, tot=197
cof1=(2*mu(i,j-2,k)+la(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(2*mu(i,j-1,k)+la(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(2*mu(i,j+1,k)+la(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(2*mu(i,j+2,k)+la(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 = r2 + i6* (
mux1*(u(2,i,j-2,k)-u(2,i,j,k)) +
mux2*(u(2,i,j-1,k)-u(2,i,j,k)) +
mux3*(u(2,i,j+1,k)-u(2,i,j,k)) +
mux4*(u(2,i,j+2,k)-u(2,i,j,k)) )*istrx;
// pp derivative (w) (w-eq)
// 43 ops, tot=240
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 = r3 + i6* (
mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) )*istry;
// qq derivative (w) (w-eq)
// 43 ops, tot=283
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 = r3 + i6* (
mux1*(u(3,i,j-2,k)-u(3,i,j,k)) +
mux2*(u(3,i,j-1,k)-u(3,i,j,k)) +
mux3*(u(3,i,j+1,k)-u(3,i,j,k)) +
mux4*(u(3,i,j+2,k)-u(3,i,j,k)) )*istrx;
// All rr-derivatives at once
// averaging the coefficient
// 54*8*8+25*8 = 3656 ops, tot=3939
float_sw4 mucofu2, mucofuv, mucofuw, mucofvw, mucofv2, mucofw2;
for( int q=1 ; q <= 8 ; q++ )
{
mucofu2=0;
mucofuv=0;
mucofuw=0;
mucofvw=0;
mucofv2=0;
mucofw2=0;
for( int m=1 ; m <= 8 ; m++ )
{
mucofu2 +=
acof(k,q,m)*(
(2*mu(i,j,m)+la(i,j,m) )*met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)
+ mu(i,j,m)*(met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j)+
met(4,i,j,m)*met(4,i,j,m) )
);
mucofv2 +=
acof(k,q,m)*(
(2*mu(i,j,m)+la(i,j,m) )*met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j)
+ mu(i,j,m)*(met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)+
met(4,i,j,m)*met(4,i,j,m) )
);
mucofw2 +=
acof(k,q,m)*((2*mu(i,j,m)+la(i,j,m))*met(4,i,j,m)*met(4,i,j,m)
+ mu(i,j,m)*( met(2,i,j,m)*strx(i)*met(2,i,j,m)*strx(i)+
met(3,i,j,m)*stry(j)*met(3,i,j,m)*stry(j) ) );
mucofuv += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(2,i,j,m)*met(3,i,j,m);
mucofuw += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(2,i,j,m)*met(4,i,j,m);
mucofvw += acof(k,q,m)*(mu(i,j,m)+la(i,j,m))*met(3,i,j,m)*met(4,i,j,m);
}
// Computing the second derivative,
r1 += istrxy*mucofu2*u(1,i,j,q) + mucofuv*u(2,i,j,q) + istry*mucofuw*u(3,i,j,q);
r2 += mucofuv*u(1,i,j,q) + istrxy*mucofv2*u(2,i,j,q) + istrx*mucofvw*u(3,i,j,q);
r3 += istry*mucofuw*u(1,i,j,q) + istrx*mucofvw*u(2,i,j,q) + istrxy*mucofw2*u(3,i,j,q);
}
// Ghost point values, only nonzero for k=1.
// 72 ops., tot=4011
mucofu2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*
met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)
+ mu(i,j,1)*(met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j)+
met(4,i,j,1)*met(4,i,j,1) ));
mucofv2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*
met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j)
+ mu(i,j,1)*( met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)+
met(4,i,j,1)*met(4,i,j,1) ) );
mucofw2 = ghcof(k)*((2*mu(i,j,1)+la(i,j,1))*met(4,i,j,1)*met(4,i,j,1)
+ mu(i,j,1)*
( met(2,i,j,1)*strx(i)*met(2,i,j,1)*strx(i)+
met(3,i,j,1)*stry(j)*met(3,i,j,1)*stry(j) ) );
mucofuv = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(2,i,j,1)*met(3,i,j,1);
mucofuw = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(2,i,j,1)*met(4,i,j,1);
mucofvw = ghcof(k)*(mu(i,j,1)+la(i,j,1))*met(3,i,j,1)*met(4,i,j,1);
r1 += istrxy*mucofu2*u(1,i,j,0) + mucofuv*u(2,i,j,0) + istry*mucofuw*u(3,i,j,0);
r2 += mucofuv*u(1,i,j,0) + istrxy*mucofv2*u(2,i,j,0) + istrx*mucofvw*u(3,i,j,0);
r3 += istry*mucofuw*u(1,i,j,0) + istrx*mucofvw*u(2,i,j,0) + istrxy*mucofw2*u(3,i,j,0);
// pq-derivatives (u-eq)
// 38 ops., tot=4049
r1 +=
c2*( mu(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i-2,j+2,k)) +
c1*(u(2,i+1,j+2,k)-u(2,i-1,j+2,k)) )
- mu(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i+2,j-2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i+1,j-2,k)-u(2,i-1,j-2,k)) )
) +
c1*( mu(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i+2,j+1,k)-u(2,i-2,j+1,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i-1,j+1,k)) )
- mu(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i+2,j-1,k)-u(2,i-2,j-1,k)) +
c1*(u(2,i+1,j-1,k)-u(2,i-1,j-1,k))));
// qp-derivatives (u-eq)
// 38 ops. tot=4087
r1 +=
c2*( la(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i+2,j-2,k)) +
c1*(u(2,i+2,j+1,k)-u(2,i+2,j-1,k)) )
- la(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j+2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i-2,j+1,k)-u(2,i-2,j-1,k)) )
) +
c1*( la(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j+2,k)-u(2,i+1,j-2,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i+1,j-1,k)) )
- la(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j+2,k)-u(2,i-1,j-2,k)) +
c1*(u(2,i-1,j+1,k)-u(2,i-1,j-1,k))));
// pq-derivatives (v-eq)
// 38 ops. , tot=4125
r2 +=
c2*( la(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i-2,j+2,k)) +
c1*(u(1,i+1,j+2,k)-u(1,i-1,j+2,k)) )
- la(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i+2,j-2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i+1,j-2,k)-u(1,i-1,j-2,k)) )
) +
c1*( la(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i+2,j+1,k)-u(1,i-2,j+1,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i-1,j+1,k)) )
- la(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i+2,j-1,k)-u(1,i-2,j-1,k)) +
c1*(u(1,i+1,j-1,k)-u(1,i-1,j-1,k))));
//* qp-derivatives (v-eq)
// 38 ops., tot=4163
r2 +=
c2*( mu(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i+2,j-2,k)) +
c1*(u(1,i+2,j+1,k)-u(1,i+2,j-1,k)) )
- mu(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j+2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i-2,j+1,k)-u(1,i-2,j-1,k)) )
) +
c1*( mu(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j+2,k)-u(1,i+1,j-2,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i+1,j-1,k)) )
- mu(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j+2,k)-u(1,i-1,j-2,k)) +
c1*(u(1,i-1,j+1,k)-u(1,i-1,j-1,k))));
// rp - derivatives
// 24*8 = 192 ops, tot=4355
float_sw4 dudrm2 = 0, dudrm1=0, dudrp1=0, dudrp2=0;
float_sw4 dvdrm2 = 0, dvdrm1=0, dvdrp1=0, dvdrp2=0;
float_sw4 dwdrm2 = 0, dwdrm1=0, dwdrp1=0, dwdrp2=0;
for( int q=1 ; q <= 8 ; q++ )
{
dudrm2 += bope(k,q)*u(1,i-2,j,q);
dvdrm2 += bope(k,q)*u(2,i-2,j,q);
dwdrm2 += bope(k,q)*u(3,i-2,j,q);
dudrm1 += bope(k,q)*u(1,i-1,j,q);
dvdrm1 += bope(k,q)*u(2,i-1,j,q);
dwdrm1 += bope(k,q)*u(3,i-1,j,q);
dudrp2 += bope(k,q)*u(1,i+2,j,q);
dvdrp2 += bope(k,q)*u(2,i+2,j,q);
dwdrp2 += bope(k,q)*u(3,i+2,j,q);
dudrp1 += bope(k,q)*u(1,i+1,j,q);
dvdrp1 += bope(k,q)*u(2,i+1,j,q);
dwdrp1 += bope(k,q)*u(3,i+1,j,q);
}
// rp derivatives (u-eq)
// 67 ops, tot=4422
r1 += ( c2*(
(2*mu(i+2,j,k)+la(i+2,j,k))*met(2,i+2,j,k)*met(1,i+2,j,k)*
strx(i+2)*dudrp2
+ la(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*dvdrp2*stry(j)
+ la(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*dwdrp2
-((2*mu(i-2,j,k)+la(i-2,j,k))*met(2,i-2,j,k)*met(1,i-2,j,k)*
strx(i-2)*dudrm2
+ la(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*dvdrm2*stry(j)
+ la(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*dwdrm2 )
) + c1*(
(2*mu(i+1,j,k)+la(i+1,j,k))*met(2,i+1,j,k)*met(1,i+1,j,k)*
strx(i+1)*dudrp1
+ la(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*dvdrp1*stry(j)
+ la(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*dwdrp1
-((2*mu(i-1,j,k)+la(i-1,j,k))*met(2,i-1,j,k)*met(1,i-1,j,k)*
strx(i-1)*dudrm1
+ la(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*dvdrm1*stry(j)
+ la(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*dwdrm1 ) ) )*istry;
// rp derivatives (v-eq)
// 42 ops, tot=4464
r2 += c2*(
mu(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*dudrp2
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*dvdrp2*
strx(i+2)*istry
- (mu(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*dudrm2
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*dvdrm2*
strx(i-2)*istry )
) + c1*(
mu(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*dudrp1
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*dvdrp1*
strx(i+1)*istry
- (mu(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*dudrm1
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*dvdrm1*
strx(i-1)*istry )
);
// rp derivatives (w-eq)
// 38 ops, tot=4502
r3 += istry*(c2*(
mu(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*dudrp2
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*dwdrp2*strx(i+2)
- (mu(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*dudrm2
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*dwdrm2*strx(i-2))
) + c1*(
mu(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*dudrp1
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*dwdrp1*strx(i+1)
- (mu(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*dudrm1
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*dwdrm1*strx(i-1))
) );
// rq - derivatives
// 24*8 = 192 ops , tot=4694
dudrm2 = 0;
dudrm1 = 0;
dudrp1 = 0;
dudrp2 = 0;
dvdrm2 = 0;
dvdrm1 = 0;
dvdrp1 = 0;
dvdrp2 = 0;
dwdrm2 = 0;
dwdrm1 = 0;
dwdrp1 = 0;
dwdrp2 = 0;
for( int q=1 ; q <= 8 ; q++ )
{
dudrm2 += bope(k,q)*u(1,i,j-2,q);
dvdrm2 += bope(k,q)*u(2,i,j-2,q);
dwdrm2 += bope(k,q)*u(3,i,j-2,q);
dudrm1 += bope(k,q)*u(1,i,j-1,q);
dvdrm1 += bope(k,q)*u(2,i,j-1,q);
dwdrm1 += bope(k,q)*u(3,i,j-1,q);
dudrp2 += bope(k,q)*u(1,i,j+2,q);
dvdrp2 += bope(k,q)*u(2,i,j+2,q);
dwdrp2 += bope(k,q)*u(3,i,j+2,q);
dudrp1 += bope(k,q)*u(1,i,j+1,q);
dvdrp1 += bope(k,q)*u(2,i,j+1,q);
dwdrp1 += bope(k,q)*u(3,i,j+1,q);
}
// rq derivatives (u-eq)
// 42 ops, tot=4736
r1 += c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*dudrp2*
stry(j+2)*istrx
+ mu(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
- (mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*dudrm2*
stry(j-2)*istrx
+ mu(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*dvdrm2)
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*dudrp1*
stry(j+1)*istrx
+ mu(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
- (mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*dudrm1*
stry(j-1)*istrx
+ mu(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*dvdrm1)
);
// rq derivatives (v-eq)
// 70 ops, tot=4806
r2 += c2*(
la(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*dudrp2
+(2*mu(i,j+2,k)+la(i,j+2,k))*met(3,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
*stry(j+2)*istrx
+ la(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*dwdrp2*istrx
- ( la(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*dudrm2
+(2*mu(i,j-2,k)+la(i,j-2,k))*met(3,i,j-2,k)*met(1,i,j-2,k)*dvdrm2
*stry(j-2)*istrx
+ la(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*dwdrm2*istrx )
) + c1*(
la(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*dudrp1
+(2*mu(i,j+1,k)+la(i,j+1,k))*met(3,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
*stry(j+1)*istrx
+ la(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*dwdrp1*istrx
- ( la(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*dudrm1
+(2*mu(i,j-1,k)+la(i,j-1,k))*met(3,i,j-1,k)*met(1,i,j-1,k)*dvdrm1
*stry(j-1)*istrx
+ la(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*dwdrm1*istrx )
);
// rq derivatives (w-eq)
// 39 ops, tot=4845
r3 += ( c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*dwdrp2*stry(j+2)
+ mu(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*dvdrp2
- (mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*dwdrm2*stry(j-2)
+ mu(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*dvdrm2)
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*dwdrp1*stry(j+1)
+ mu(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*dvdrp1
- (mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*dwdrm1*stry(j-1)
+ mu(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*dvdrm1)
) )*istrx;
// pr and qr derivatives at once
// in loop: 8*(53+53+43) = 1192 ops, tot=6037
for( int q=1 ; q <= 8 ; q++ )
{
// (u-eq)
// 53 ops
r1 += bope(k,q)*(
// pr
(2*mu(i,j,q)+la(i,j,q))*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )*strx(i)*istry
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i+2,j,q)-u(2,i-2,j,q)) +
c1*(u(2,i+1,j,q)-u(2,i-1,j,q)) )
+ mu(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i+2,j,q)-u(3,i-2,j,q)) +
c1*(u(3,i+1,j,q)-u(3,i-1,j,q)) )*istry
// qr
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i,j+2,q)-u(1,i,j-2,q)) +
c1*(u(1,i,j+1,q)-u(1,i,j-1,q)) )*stry(j)*istrx
+ la(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) ) );
// (v-eq)
// 53 ops
r2 += bope(k,q)*(
// pr
la(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i+2,j,q)-u(2,i-2,j,q)) +
c1*(u(2,i+1,j,q)-u(2,i-1,j,q)) )*strx(i)*istry
// qr
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i,j+2,q)-u(1,i,j-2,q)) +
c1*(u(1,i,j+1,q)-u(1,i,j-1,q)) )
+ (2*mu(i,j,q)+la(i,j,q))*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) )*stry(j)*istrx
+ mu(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i,j+2,q)-u(3,i,j-2,q)) +
c1*(u(3,i,j+1,q)-u(3,i,j-1,q)) )*istrx );
// (w-eq)
// 43 ops
r3 += bope(k,q)*(
// pr
la(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(1,i+2,j,q)-u(1,i-2,j,q)) +
c1*(u(1,i+1,j,q)-u(1,i-1,j,q)) )*istry
+ mu(i,j,q)*met(2,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i+2,j,q)-u(3,i-2,j,q)) +
c1*(u(3,i+1,j,q)-u(3,i-1,j,q)) )*strx(i)*istry
// qr
+ mu(i,j,q)*met(3,i,j,q)*met(1,i,j,q)*(
c2*(u(3,i,j+2,q)-u(3,i,j-2,q)) +
c1*(u(3,i,j+1,q)-u(3,i,j-1,q)) )*stry(j)*istrx
+ la(i,j,q)*met(4,i,j,q)*met(1,i,j,q)*(
c2*(u(2,i,j+2,q)-u(2,i,j-2,q)) +
c1*(u(2,i,j+1,q)-u(2,i,j-1,q)) )*istrx );
}
// 12 ops, tot=6049
lu(1,i,j,k) = a1*lu(1,i,j,k) + r1*ijac;
lu(2,i,j,k) = a1*lu(2,i,j,k) + r2*ijac;
lu(3,i,j,k) = a1*lu(3,i,j,k) + r3*ijac;
}
}
#pragma omp for
for( int k= kstart; k <= klast-2 ; k++ )
for( int j=jfirst+2; j <= jlast-2 ; j++ )
#pragma simd
#pragma ivdep
for( int i=ifirst+2; i <= ilast-2 ; i++ )
{
// 5 ops
float_sw4 ijac = strx(i)*stry(j)/jac(i,j,k);
float_sw4 istry = 1/(stry(j));
float_sw4 istrx = 1/(strx(i));
float_sw4 istrxy = istry*istrx;
float_sw4 r1 = 0, r2=0, r3=0;
// pp derivative (u)
// 53 ops, tot=58
float_sw4 cof1=(2*mu(i-2,j,k)+la(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)
*strx(i-2);
float_sw4 cof2=(2*mu(i-1,j,k)+la(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)
*strx(i-1);
float_sw4 cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)
*strx(i);
float_sw4 cof4=(2*mu(i+1,j,k)+la(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)
*strx(i+1);
float_sw4 cof5=(2*mu(i+2,j,k)+la(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)
*strx(i+2);
float_sw4 mux1 = cof2 -tf*(cof3+cof1);
float_sw4 mux2 = cof1 + cof4+3*(cof3+cof2);
float_sw4 mux3 = cof2 + cof5+3*(cof4+cof3);
float_sw4 mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i-2,j,k)-u(1,i,j,k)) +
mux2*(u(1,i-1,j,k)-u(1,i,j,k)) +
mux3*(u(1,i+1,j,k)-u(1,i,j,k)) +
mux4*(u(1,i+2,j,k)-u(1,i,j,k)) )*istry;
// qq derivative (u)
// 43 ops, tot=101
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i,j-2,k)-u(1,i,j,k)) +
mux2*(u(1,i,j-1,k)-u(1,i,j,k)) +
mux3*(u(1,i,j+1,k)-u(1,i,j,k)) +
mux4*(u(1,i,j+2,k)-u(1,i,j,k)) )*istrx;
// rr derivative (u)
// 5*11+14+14=83 ops, tot=184
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)
+ mu(i,j,k-2)*(met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j)+
met(4,i,j,k-2)*met(4,i,j,k-2));
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)
+ mu(i,j,k-1)*(met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j)+
met(4,i,j,k-1)*met(4,i,j,k-1) );
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i) +
mu(i,j,k)*(met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j)+
met(4,i,j,k)*met(4,i,j,k));
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)
+ mu(i,j,k+1)*(met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j)+
met(4,i,j,k+1)*met(4,i,j,k+1));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)
+ mu(i,j,k+2)*( met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j)+
met(4,i,j,k+2)*met(4,i,j,k+2));
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) )*istrxy;
// rr derivative (v)
// 42 ops, tot=226
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(3,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(3,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(3,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(3,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(3,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) );
// rr derivative (w)
// 43 ops, tot=269
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r1 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istry;
// pq-derivatives
// 38 ops, tot=307
r1 +=
c2*( mu(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i-2,j+2,k)) +
c1*(u(2,i+1,j+2,k)-u(2,i-1,j+2,k)) )
- mu(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i+2,j-2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i+1,j-2,k)-u(2,i-1,j-2,k)) )
) +
c1*( mu(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i+2,j+1,k)-u(2,i-2,j+1,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i-1,j+1,k)) )
- mu(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i+2,j-1,k)-u(2,i-2,j-1,k)) +
c1*(u(2,i+1,j-1,k)-u(2,i-1,j-1,k))));
// qp-derivatives
// 38 ops, tot=345
r1 +=
c2*( la(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j+2,k)-u(2,i+2,j-2,k)) +
c1*(u(2,i+2,j+1,k)-u(2,i+2,j-1,k)) )
- la(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j+2,k)-u(2,i-2,j-2,k))+
c1*(u(2,i-2,j+1,k)-u(2,i-2,j-1,k)) )
) +
c1*( la(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j+2,k)-u(2,i+1,j-2,k)) +
c1*(u(2,i+1,j+1,k)-u(2,i+1,j-1,k)) )
- la(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j+2,k)-u(2,i-1,j-2,k)) +
c1*(u(2,i-1,j+1,k)-u(2,i-1,j-1,k))));
// pr-derivatives
// 130 ops., tot=475
r1 += c2*(
(2*mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )*strx(i)*istry
+ mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i+2,j,k+2)-u(2,i-2,j,k+2)) +
c1*(u(2,i+1,j,k+2)-u(2,i-1,j,k+2)) )
+ mu(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i+2,j,k+2)-u(3,i-2,j,k+2)) +
c1*(u(3,i+1,j,k+2)-u(3,i-1,j,k+2)) )*istry
- ((2*mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )*strx(i)*istry
+ mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i+2,j,k-2)-u(2,i-2,j,k-2)) +
c1*(u(2,i+1,j,k-2)-u(2,i-1,j,k-2)) )
+ mu(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i+2,j,k-2)-u(3,i-2,j,k-2)) +
c1*(u(3,i+1,j,k-2)-u(3,i-1,j,k-2)) )*istry )
) + c1*(
(2*mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )*strx(i)*istry
+ mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i+2,j,k+1)-u(2,i-2,j,k+1)) +
c1*(u(2,i+1,j,k+1)-u(2,i-1,j,k+1)) )
+ mu(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i+2,j,k+1)-u(3,i-2,j,k+1)) +
c1*(u(3,i+1,j,k+1)-u(3,i-1,j,k+1)) )*istry
- ((2*mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )*strx(i)*istry
+ mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i+2,j,k-1)-u(2,i-2,j,k-1)) +
c1*(u(2,i+1,j,k-1)-u(2,i-1,j,k-1)) )
+ mu(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i+2,j,k-1)-u(3,i-2,j,k-1)) +
c1*(u(3,i+1,j,k-1)-u(3,i-1,j,k-1)) )*istry ) );
// rp derivatives
// 130 ops, tot=605
r1 += ( c2*(
(2*mu(i+2,j,k)+la(i+2,j,k))*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )*strx(i+2)
+ la(i+2,j,k)*met(3,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j,k+2)-u(2,i+2,j,k-2)) +
c1*(u(2,i+2,j,k+1)-u(2,i+2,j,k-1)) )*stry(j)
+ la(i+2,j,k)*met(4,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(3,i+2,j,k+2)-u(3,i+2,j,k-2)) +
c1*(u(3,i+2,j,k+1)-u(3,i+2,j,k-1)) )
- ((2*mu(i-2,j,k)+la(i-2,j,k))*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )*strx(i-2)
+ la(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j,k+2)-u(2,i-2,j,k-2)) +
c1*(u(2,i-2,j,k+1)-u(2,i-2,j,k-1)) )*stry(j)
+ la(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(3,i-2,j,k+2)-u(3,i-2,j,k-2)) +
c1*(u(3,i-2,j,k+1)-u(3,i-2,j,k-1)) ) )
) + c1*(
(2*mu(i+1,j,k)+la(i+1,j,k))*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )*strx(i+1)
+ la(i+1,j,k)*met(3,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j,k+2)-u(2,i+1,j,k-2)) +
c1*(u(2,i+1,j,k+1)-u(2,i+1,j,k-1)) )*stry(j)
+ la(i+1,j,k)*met(4,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(3,i+1,j,k+2)-u(3,i+1,j,k-2)) +
c1*(u(3,i+1,j,k+1)-u(3,i+1,j,k-1)) )
- ((2*mu(i-1,j,k)+la(i-1,j,k))*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )*strx(i-1)
+ la(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j,k+2)-u(2,i-1,j,k-2)) +
c1*(u(2,i-1,j,k+1)-u(2,i-1,j,k-1)) )*stry(j)
+ la(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(3,i-1,j,k+2)-u(3,i-1,j,k-2)) +
c1*(u(3,i-1,j,k+1)-u(3,i-1,j,k-1)) ) ) ) )*istry;
// qr derivatives
// 82 ops, tot=687
r1 += c2*(
mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j-2,k+2)) +
c1*(u(1,i,j+1,k+2)-u(1,i,j-1,k+2)) )*stry(j)*istrx
+ la(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )
- ( mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i,j+2,k-2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j+1,k-2)-u(1,i,j-1,k-2)) )*stry(j)*istrx
+ la(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) ) )
) + c1*(
mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i,j+2,k+1)-u(1,i,j-2,k+1)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j-1,k+1)) )*stry(j)*istrx
+ la(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )
- ( mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i,j+2,k-1)-u(1,i,j-2,k-1)) +
c1*(u(1,i,j+1,k-1)-u(1,i,j-1,k-1)) )*stry(j)*istrx
+ la(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) ) ) );
// rq derivatives
// 82 ops, tot=769
r1 += c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j+2,k-2)) +
c1*(u(1,i,j+2,k+1)-u(1,i,j+2,k-1)) )*stry(j+2)*istrx
+ mu(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )
- ( mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i,j-2,k+2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j-2,k+1)-u(1,i,j-2,k-1)) )*stry(j-2)*istrx
+ mu(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) ) )
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i,j+1,k+2)-u(1,i,j+1,k-2)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j+1,k-1)) )*stry(j+1)*istrx
+ mu(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )
- ( mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i,j-1,k+2)-u(1,i,j-1,k-2)) +
c1*(u(1,i,j-1,k+1)-u(1,i,j-1,k-1)) )*stry(j-1)*istrx
+ mu(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) ) ) );
// 4 ops, tot=773
lu(1,i,j,k) = a1*lu(1,i,j,k) + r1*ijac;
// v-equation
// r1 = 0;
// pp derivative (v)
// 43 ops, tot=816
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )*istry;
// qq derivative (v)
// 53 ops, tot=869
cof1=(2*mu(i,j-2,k)+la(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)
*stry(j-2);
cof2=(2*mu(i,j-1,k)+la(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)
*stry(j-1);
cof3=(2*mu(i,j,k)+la(i,j,k))*met(1,i,j,k)*met(1,i,j,k)
*stry(j);
cof4=(2*mu(i,j+1,k)+la(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)
*stry(j+1);
cof5=(2*mu(i,j+2,k)+la(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)
*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i,j-2,k)-u(2,i,j,k)) +
mux2*(u(2,i,j-1,k)-u(2,i,j,k)) +
mux3*(u(2,i,j+1,k)-u(2,i,j,k)) +
mux4*(u(2,i,j+2,k)-u(2,i,j,k)) )*istrx;
// rr derivative (u)
// 42 ops, tot=911
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(3,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(3,i,j,k-1);
cof3=(mu(i,j,k)+ la(i,j,k) )*met(2,i,j,k)* met(3,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(3,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(3,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) );
// rr derivative (v)
// 83 ops, tot=994
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j)
+ mu(i,j,k-2)*(met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)+
met(4,i,j,k-2)*met(4,i,j,k-2));
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j)
+ mu(i,j,k-1)*(met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)+
met(4,i,j,k-1)*met(4,i,j,k-1));
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j) +
mu(i,j,k)*(met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i)+
met(4,i,j,k)*met(4,i,j,k));
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j)
+ mu(i,j,k+1)*(met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)+
met(4,i,j,k+1)*met(4,i,j,k+1));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j)
+ mu(i,j,k+2)*(met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)+
met(4,i,j,k+2)*met(4,i,j,k+2));
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) )*istrxy;
// rr derivative (w)
// 43 ops, tot=1037
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k) +la(i,j,k) )*met(3,i,j,k)* met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r2 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istrx;
// pq-derivatives
// 38 ops, tot=1075
r2 +=
c2*( la(i,j+2,k)*met(1,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i-2,j+2,k)) +
c1*(u(1,i+1,j+2,k)-u(1,i-1,j+2,k)) )
- la(i,j-2,k)*met(1,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i+2,j-2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i+1,j-2,k)-u(1,i-1,j-2,k)) )
) +
c1*( la(i,j+1,k)*met(1,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i+2,j+1,k)-u(1,i-2,j+1,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i-1,j+1,k)) )
- la(i,j-1,k)*met(1,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i+2,j-1,k)-u(1,i-2,j-1,k)) +
c1*(u(1,i+1,j-1,k)-u(1,i-1,j-1,k))));
// qp-derivatives
// 38 ops, tot=1113
r2 +=
c2*( mu(i+2,j,k)*met(1,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j+2,k)-u(1,i+2,j-2,k)) +
c1*(u(1,i+2,j+1,k)-u(1,i+2,j-1,k)) )
- mu(i-2,j,k)*met(1,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j+2,k)-u(1,i-2,j-2,k))+
c1*(u(1,i-2,j+1,k)-u(1,i-2,j-1,k)) )
) +
c1*( mu(i+1,j,k)*met(1,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j+2,k)-u(1,i+1,j-2,k)) +
c1*(u(1,i+1,j+1,k)-u(1,i+1,j-1,k)) )
- mu(i-1,j,k)*met(1,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j+2,k)-u(1,i-1,j-2,k)) +
c1*(u(1,i-1,j+1,k)-u(1,i-1,j-1,k))));
// pr-derivatives
// 82 ops, tot=1195
r2 += c2*(
(la(i,j,k+2))*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )
+ mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i+2,j,k+2)-u(2,i-2,j,k+2)) +
c1*(u(2,i+1,j,k+2)-u(2,i-1,j,k+2)) )*strx(i)*istry
- ((la(i,j,k-2))*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )
+ mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i+2,j,k-2)-u(2,i-2,j,k-2)) +
c1*(u(2,i+1,j,k-2)-u(2,i-1,j,k-2)) )*strx(i)*istry )
) + c1*(
(la(i,j,k+1))*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )
+ mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i+2,j,k+1)-u(2,i-2,j,k+1)) +
c1*(u(2,i+1,j,k+1)-u(2,i-1,j,k+1)) )*strx(i)*istry
- (la(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )
+ mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i+2,j,k-1)-u(2,i-2,j,k-1)) +
c1*(u(2,i+1,j,k-1)-u(2,i-1,j,k-1)) )*strx(i)*istry ) );
// rp derivatives
// 82 ops, tot=1277
r2 += c2*(
(mu(i+2,j,k))*met(3,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(2,i+2,j,k+2)-u(2,i+2,j,k-2)) +
c1*(u(2,i+2,j,k+1)-u(2,i+2,j,k-1)) )*strx(i+2)*istry
- (mu(i-2,j,k)*met(3,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(2,i-2,j,k+2)-u(2,i-2,j,k-2)) +
c1*(u(2,i-2,j,k+1)-u(2,i-2,j,k-1)) )*strx(i-2)*istry )
) + c1*(
(mu(i+1,j,k))*met(3,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(2,i+1,j,k+2)-u(2,i+1,j,k-2)) +
c1*(u(2,i+1,j,k+1)-u(2,i+1,j,k-1)) )*strx(i+1)*istry
- (mu(i-1,j,k)*met(3,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(2,i-1,j,k+2)-u(2,i-1,j,k-2)) +
c1*(u(2,i-1,j,k+1)-u(2,i-1,j,k-1)) )*strx(i-1)*istry ) );
// qr derivatives
// 130 ops, tot=1407
r2 += c2*(
mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j-2,k+2)) +
c1*(u(1,i,j+1,k+2)-u(1,i,j-1,k+2)) )
+ (2*mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )*stry(j)*istrx
+mu(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j-2,k+2)) +
c1*(u(3,i,j+1,k+2)-u(3,i,j-1,k+2)) )*istrx
- ( mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i,j+2,k-2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j+1,k-2)-u(1,i,j-1,k-2)) )
+(2*mu(i,j,k-2)+ la(i,j,k-2))*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) )*stry(j)*istrx +
mu(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i,j+2,k-2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j+1,k-2)-u(3,i,j-1,k-2)) )*istrx )
) + c1*(
mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i,j+2,k+1)-u(1,i,j-2,k+1)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j-1,k+1)) )
+ (2*mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )*stry(j)*istrx
+ mu(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i,j+2,k+1)-u(3,i,j-2,k+1)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j-1,k+1)) )*istrx
- ( mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i,j+2,k-1)-u(1,i,j-2,k-1)) +
c1*(u(1,i,j+1,k-1)-u(1,i,j-1,k-1)) )
+ (2*mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) )*stry(j)*istrx
+ mu(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i,j+2,k-1)-u(3,i,j-2,k-1)) +
c1*(u(3,i,j+1,k-1)-u(3,i,j-1,k-1)) )*istrx ) );
// rq derivatives
// 130 ops, tot=1537
r2 += c2*(
la(i,j+2,k)*met(2,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(1,i,j+2,k+2)-u(1,i,j+2,k-2)) +
c1*(u(1,i,j+2,k+1)-u(1,i,j+2,k-1)) )
+(2*mu(i,j+2,k)+la(i,j+2,k))*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )*stry(j+2)*istrx
+ la(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j+2,k-2)) +
c1*(u(3,i,j+2,k+1)-u(3,i,j+2,k-1)) )*istrx
- ( la(i,j-2,k)*met(2,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(1,i,j-2,k+2)-u(1,i,j-2,k-2)) +
c1*(u(1,i,j-2,k+1)-u(1,i,j-2,k-1)) )
+(2*mu(i,j-2,k)+la(i,j-2,k))*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) )*stry(j-2)*istrx
+ la(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(3,i,j-2,k+2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j-2,k+1)-u(3,i,j-2,k-1)) )*istrx )
) + c1*(
la(i,j+1,k)*met(2,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(1,i,j+1,k+2)-u(1,i,j+1,k-2)) +
c1*(u(1,i,j+1,k+1)-u(1,i,j+1,k-1)) )
+ (2*mu(i,j+1,k)+la(i,j+1,k))*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )*stry(j+1)*istrx
+la(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(3,i,j+1,k+2)-u(3,i,j+1,k-2)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j+1,k-1)) )*istrx
- ( la(i,j-1,k)*met(2,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(1,i,j-1,k+2)-u(1,i,j-1,k-2)) +
c1*(u(1,i,j-1,k+1)-u(1,i,j-1,k-1)) )
+ (2*mu(i,j-1,k)+la(i,j-1,k))*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) )*stry(j-1)*istrx
+ la(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(3,i,j-1,k+2)-u(3,i,j-1,k-2)) +
c1*(u(3,i,j-1,k+1)-u(3,i,j-1,k-1)) )*istrx ) );
// 4 ops, tot=1541
lu(2,i,j,k) = a1*lu(2,i,j,k) + r2*ijac;
// w-equation
// r1 = 0;
// pp derivative (w)
// 43 ops, tot=1580
cof1=(mu(i-2,j,k))*met(1,i-2,j,k)*met(1,i-2,j,k)*strx(i-2);
cof2=(mu(i-1,j,k))*met(1,i-1,j,k)*met(1,i-1,j,k)*strx(i-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*strx(i);
cof4=(mu(i+1,j,k))*met(1,i+1,j,k)*met(1,i+1,j,k)*strx(i+1);
cof5=(mu(i+2,j,k))*met(1,i+2,j,k)*met(1,i+2,j,k)*strx(i+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) )*istry;
// qq derivative (w)
// 43 ops, tot=1623
cof1=(mu(i,j-2,k))*met(1,i,j-2,k)*met(1,i,j-2,k)*stry(j-2);
cof2=(mu(i,j-1,k))*met(1,i,j-1,k)*met(1,i,j-1,k)*stry(j-1);
cof3=(mu(i,j,k))*met(1,i,j,k)*met(1,i,j,k)*stry(j);
cof4=(mu(i,j+1,k))*met(1,i,j+1,k)*met(1,i,j+1,k)*stry(j+1);
cof5=(mu(i,j+2,k))*met(1,i,j+2,k)*met(1,i,j+2,k)*stry(j+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i,j-2,k)-u(3,i,j,k)) +
mux2*(u(3,i,j-1,k)-u(3,i,j,k)) +
mux3*(u(3,i,j+1,k)-u(3,i,j,k)) +
mux4*(u(3,i,j+2,k)-u(3,i,j,k)) )*istrx;
// rr derivative (u)
// 43 ops, tot=1666
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(2,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(2,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(2,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(2,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(2,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(1,i,j,k-2)-u(1,i,j,k)) +
mux2*(u(1,i,j,k-1)-u(1,i,j,k)) +
mux3*(u(1,i,j,k+1)-u(1,i,j,k)) +
mux4*(u(1,i,j,k+2)-u(1,i,j,k)) )*istry;
// rr derivative (v)
// 43 ops, tot=1709
cof1=(mu(i,j,k-2)+la(i,j,k-2))*met(3,i,j,k-2)*met(4,i,j,k-2);
cof2=(mu(i,j,k-1)+la(i,j,k-1))*met(3,i,j,k-1)*met(4,i,j,k-1);
cof3=(mu(i,j,k)+la(i,j,k))*met(3,i,j,k)*met(4,i,j,k);
cof4=(mu(i,j,k+1)+la(i,j,k+1))*met(3,i,j,k+1)*met(4,i,j,k+1);
cof5=(mu(i,j,k+2)+la(i,j,k+2))*met(3,i,j,k+2)*met(4,i,j,k+2);
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(2,i,j,k-2)-u(2,i,j,k)) +
mux2*(u(2,i,j,k-1)-u(2,i,j,k)) +
mux3*(u(2,i,j,k+1)-u(2,i,j,k)) +
mux4*(u(2,i,j,k+2)-u(2,i,j,k)) )*istrx;
// rr derivative (w)
// 83 ops, tot=1792
cof1 = (2*mu(i,j,k-2)+la(i,j,k-2))*met(4,i,j,k-2)*met(4,i,j,k-2) +
mu(i,j,k-2)*(met(2,i,j,k-2)*strx(i)*met(2,i,j,k-2)*strx(i)+
met(3,i,j,k-2)*stry(j)*met(3,i,j,k-2)*stry(j) );
cof2 = (2*mu(i,j,k-1)+la(i,j,k-1))*met(4,i,j,k-1)*met(4,i,j,k-1) +
mu(i,j,k-1)*(met(2,i,j,k-1)*strx(i)*met(2,i,j,k-1)*strx(i)+
met(3,i,j,k-1)*stry(j)*met(3,i,j,k-1)*stry(j) );
cof3 = (2*mu(i,j,k)+la(i,j,k))*met(4,i,j,k)*met(4,i,j,k) +
mu(i,j,k)*(met(2,i,j,k)*strx(i)*met(2,i,j,k)*strx(i)+
met(3,i,j,k)*stry(j)*met(3,i,j,k)*stry(j) );
cof4 = (2*mu(i,j,k+1)+la(i,j,k+1))*met(4,i,j,k+1)*met(4,i,j,k+1) +
mu(i,j,k+1)*(met(2,i,j,k+1)*strx(i)*met(2,i,j,k+1)*strx(i)+
met(3,i,j,k+1)*stry(j)*met(3,i,j,k+1)*stry(j));
cof5 = (2*mu(i,j,k+2)+la(i,j,k+2))*met(4,i,j,k+2)*met(4,i,j,k+2) +
mu(i,j,k+2)*( met(2,i,j,k+2)*strx(i)*met(2,i,j,k+2)*strx(i)+
met(3,i,j,k+2)*stry(j)*met(3,i,j,k+2)*stry(j) );
mux1 = cof2 -tf*(cof3+cof1);
mux2 = cof1 + cof4+3*(cof3+cof2);
mux3 = cof2 + cof5+3*(cof4+cof3);
mux4 = cof4-tf*(cof3+cof5);
r3 += i6* (
mux1*(u(3,i,j,k-2)-u(3,i,j,k)) +
mux2*(u(3,i,j,k-1)-u(3,i,j,k)) +
mux3*(u(3,i,j,k+1)-u(3,i,j,k)) +
mux4*(u(3,i,j,k+2)-u(3,i,j,k)) )*istrxy
// pr-derivatives
// 86 ops, tot=1878
// r1 +=
+ c2*(
(la(i,j,k+2))*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(1,i+2,j,k+2)-u(1,i-2,j,k+2)) +
c1*(u(1,i+1,j,k+2)-u(1,i-1,j,k+2)) )*istry
+ mu(i,j,k+2)*met(2,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i+2,j,k+2)-u(3,i-2,j,k+2)) +
c1*(u(3,i+1,j,k+2)-u(3,i-1,j,k+2)) )*strx(i)*istry
- ((la(i,j,k-2))*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(1,i+2,j,k-2)-u(1,i-2,j,k-2)) +
c1*(u(1,i+1,j,k-2)-u(1,i-1,j,k-2)) )*istry
+ mu(i,j,k-2)*met(2,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i+2,j,k-2)-u(3,i-2,j,k-2)) +
c1*(u(3,i+1,j,k-2)-u(3,i-1,j,k-2)) )*strx(i)*istry )
) + c1*(
(la(i,j,k+1))*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(1,i+2,j,k+1)-u(1,i-2,j,k+1)) +
c1*(u(1,i+1,j,k+1)-u(1,i-1,j,k+1)) )*istry
+ mu(i,j,k+1)*met(2,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i+2,j,k+1)-u(3,i-2,j,k+1)) +
c1*(u(3,i+1,j,k+1)-u(3,i-1,j,k+1)) )*strx(i)*istry
- (la(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(1,i+2,j,k-1)-u(1,i-2,j,k-1)) +
c1*(u(1,i+1,j,k-1)-u(1,i-1,j,k-1)) )*istry
+ mu(i,j,k-1)*met(2,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i+2,j,k-1)-u(3,i-2,j,k-1)) +
c1*(u(3,i+1,j,k-1)-u(3,i-1,j,k-1)) )*strx(i)*istry ) )
// rp derivatives
// 79 ops, tot=1957
// r1 +=
+ istry*(c2*(
(mu(i+2,j,k))*met(4,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(1,i+2,j,k+2)-u(1,i+2,j,k-2)) +
c1*(u(1,i+2,j,k+1)-u(1,i+2,j,k-1)) )
+ mu(i+2,j,k)*met(2,i+2,j,k)*met(1,i+2,j,k)*(
c2*(u(3,i+2,j,k+2)-u(3,i+2,j,k-2)) +
c1*(u(3,i+2,j,k+1)-u(3,i+2,j,k-1)) )*strx(i+2)
- (mu(i-2,j,k)*met(4,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(1,i-2,j,k+2)-u(1,i-2,j,k-2)) +
c1*(u(1,i-2,j,k+1)-u(1,i-2,j,k-1)) )
+ mu(i-2,j,k)*met(2,i-2,j,k)*met(1,i-2,j,k)*(
c2*(u(3,i-2,j,k+2)-u(3,i-2,j,k-2)) +
c1*(u(3,i-2,j,k+1)-u(3,i-2,j,k-1)) )*strx(i-2) )
) + c1*(
(mu(i+1,j,k))*met(4,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(1,i+1,j,k+2)-u(1,i+1,j,k-2)) +
c1*(u(1,i+1,j,k+1)-u(1,i+1,j,k-1)) )
+ mu(i+1,j,k)*met(2,i+1,j,k)*met(1,i+1,j,k)*(
c2*(u(3,i+1,j,k+2)-u(3,i+1,j,k-2)) +
c1*(u(3,i+1,j,k+1)-u(3,i+1,j,k-1)) )*strx(i+1)
- (mu(i-1,j,k)*met(4,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(1,i-1,j,k+2)-u(1,i-1,j,k-2)) +
c1*(u(1,i-1,j,k+1)-u(1,i-1,j,k-1)) )
+ mu(i-1,j,k)*met(2,i-1,j,k)*met(1,i-1,j,k)*(
c2*(u(3,i-1,j,k+2)-u(3,i-1,j,k-2)) +
c1*(u(3,i-1,j,k+1)-u(3,i-1,j,k-1)) )*strx(i-1) ) ) )
// qr derivatives
// 86 ops, tot=2043
// r1 +=
+ c2*(
mu(i,j,k+2)*met(3,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j-2,k+2)) +
c1*(u(3,i,j+1,k+2)-u(3,i,j-1,k+2)) )*stry(j)*istrx
+ la(i,j,k+2)*met(4,i,j,k+2)*met(1,i,j,k+2)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j-2,k+2)) +
c1*(u(2,i,j+1,k+2)-u(2,i,j-1,k+2)) )*istrx
- ( mu(i,j,k-2)*met(3,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(3,i,j+2,k-2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j+1,k-2)-u(3,i,j-1,k-2)) )*stry(j)*istrx
+ la(i,j,k-2)*met(4,i,j,k-2)*met(1,i,j,k-2)*(
c2*(u(2,i,j+2,k-2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j+1,k-2)-u(2,i,j-1,k-2)) )*istrx )
) + c1*(
mu(i,j,k+1)*met(3,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(3,i,j+2,k+1)-u(3,i,j-2,k+1)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j-1,k+1)) )*stry(j)*istrx
+ la(i,j,k+1)*met(4,i,j,k+1)*met(1,i,j,k+1)*(
c2*(u(2,i,j+2,k+1)-u(2,i,j-2,k+1)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j-1,k+1)) )*istrx
- ( mu(i,j,k-1)*met(3,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(3,i,j+2,k-1)-u(3,i,j-2,k-1)) +
c1*(u(3,i,j+1,k-1)-u(3,i,j-1,k-1)) )*stry(j)*istrx
+ la(i,j,k-1)*met(4,i,j,k-1)*met(1,i,j,k-1)*(
c2*(u(2,i,j+2,k-1)-u(2,i,j-2,k-1)) +
c1*(u(2,i,j+1,k-1)-u(2,i,j-1,k-1)) )*istrx ) )
// rq derivatives
// 79 ops, tot=2122
// r1 +=
+ istrx*(c2*(
mu(i,j+2,k)*met(3,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(3,i,j+2,k+2)-u(3,i,j+2,k-2)) +
c1*(u(3,i,j+2,k+1)-u(3,i,j+2,k-1)) )*stry(j+2)
+ mu(i,j+2,k)*met(4,i,j+2,k)*met(1,i,j+2,k)*(
c2*(u(2,i,j+2,k+2)-u(2,i,j+2,k-2)) +
c1*(u(2,i,j+2,k+1)-u(2,i,j+2,k-1)) )
- ( mu(i,j-2,k)*met(3,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(3,i,j-2,k+2)-u(3,i,j-2,k-2)) +
c1*(u(3,i,j-2,k+1)-u(3,i,j-2,k-1)) )*stry(j-2)
+ mu(i,j-2,k)*met(4,i,j-2,k)*met(1,i,j-2,k)*(
c2*(u(2,i,j-2,k+2)-u(2,i,j-2,k-2)) +
c1*(u(2,i,j-2,k+1)-u(2,i,j-2,k-1)) ) )
) + c1*(
mu(i,j+1,k)*met(3,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(3,i,j+1,k+2)-u(3,i,j+1,k-2)) +
c1*(u(3,i,j+1,k+1)-u(3,i,j+1,k-1)) )*stry(j+1)
+ mu(i,j+1,k)*met(4,i,j+1,k)*met(1,i,j+1,k)*(
c2*(u(2,i,j+1,k+2)-u(2,i,j+1,k-2)) +
c1*(u(2,i,j+1,k+1)-u(2,i,j+1,k-1)) )
- ( mu(i,j-1,k)*met(3,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(3,i,j-1,k+2)-u(3,i,j-1,k-2)) +
c1*(u(3,i,j-1,k+1)-u(3,i,j-1,k-1)) )*stry(j-1)
+ mu(i,j-1,k)*met(4,i,j-1,k)*met(1,i,j-1,k)*(
c2*(u(2,i,j-1,k+2)-u(2,i,j-1,k-2)) +
c1*(u(2,i,j-1,k+1)-u(2,i,j-1,k-1)) ) ) ) );
// 4 ops, tot=2126
lu(3,i,j,k) = a1*lu(3,i,j,k) + r3*ijac;
}
}
#undef mu
#undef la
#undef jac
#undef u
#undef lu
#undef met
#undef strx
#undef stry
#undef acof
#undef bope
#undef ghcof
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status&=image->colormap[i].red != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
status&=image->colormap[i].green != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
status&=image->colormap[i].blue != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].blue,range),range);
if (status == 0)
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
register ssize_t
i;
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status&=GetPixelRed(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelRed(p),range),range);
if ((channel & GreenChannel) != 0)
status&=GetPixelGreen(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelGreen(p),range),range);
if ((channel & BlueChannel) != 0)
status&=GetPixelBlue(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelBlue(p),range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status&=GetPixelOpacity(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelOpacity(p),range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status&=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
MagickBooleanType
status;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->colorspace=GRAYColorspace;
status=SyncImagePixelCache((Image *) image,exception);
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(status);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].blue,range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].opacity,range),range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(q),
range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(q),
range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(q),
range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(q),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (IsMonochromeImage(image,&image->exception) == MagickFalse)
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->matte_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
Parallel-Impl.h | /*************************************************************************
> File Name: Parallel-Impl.h
> Project Name: CubbyFlow
> This code is based on Jet Framework that was created by Doyub Kim.
> References: https://github.com/doyubkim/fluid-engine-dev
> Purpose: Parallel functions for CubbyFlow.
> Created Time: 2017/02/05
> Copyright (c) 2018, Chan-Ho Chris Ohk
*************************************************************************/
#ifndef CUBBYFLOW_PARALLEL_IMPL_H
#define CUBBYFLOW_PARALLEL_IMPL_H
#include <Core/Utils/Constants.h>
#include <Core/Utils/Parallel.h>
#if defined(CUBBYFLOW_TASKING_HPX)
#include <hpx/include/future.hpp>
#include <hpx/include/parallel_fill.hpp>
#include <hpx/include/parallel_for_each.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <hpx/include/parallel_reduce.hpp>
#include <hpx/include/parallel_sort.hpp>
#endif
#if defined(CUBBYFLOW_TASKING_TBB)
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <tbb/parallel_sort.h>
#include <tbb/task.h>
#elif defined(CUBBYFLOW_TASKING_CPP11THREAD)
#include <thread>
#endif
#include <algorithm>
#include <cmath>
#include <future>
#include <vector>
#undef max
#undef min
namespace CubbyFlow
{
namespace Internal
{
#if defined(CUBBYFLOW_TASKING_HPX)
template <typename Task>
using future = hpx::future<Task>;
#else
template <typename Task>
using future = std::future<Task>;
#endif
template <typename TASK>
using operator_return_t = typename std::result_of<TASK()>::type;
template <typename TASK>
inline auto Async(TASK&& fn) -> future<operator_return_t<TASK>>
{
#if defined(CUBBYFLOW_TASKING_HPX)
return hpx::async(std::forward<TASK>(fn));
#elif defined(CUBBYFLOW_TASKING_TBB)
struct LocalTBBTask : public tbb::task
{
TASK func;
LocalTBBTask(TASK&& f) : func(std::forward<TASK>(f))
{
// Do nothing
}
tbb::task* execute() override
{
func();
return nullptr;
}
};
using package_t = std::packaged_task<operator_return_t<TASK>()>;
auto task = new package_t(std::forward<TASK>(fn));
auto* tbbNode = new (tbb::task::allocate_root()) LocalTBBTask([=]()
{
(*task)();
delete task;
});
tbb::task::enqueue(*tbbNode);
return task.get_future();
#elif defined(CUBBYFLOW_TASKING_CPP11THREAD)
return std::async(std::launch::async, fn);
#else
return std::async(std::launch::deferred, fn);
#endif
}
// Adopted from:
// Radenski, A.
// Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and
// Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel
// and Distributed Processing Techniques and Applications, CSREA Press
// (H. Arabnia, Ed.), 2011, pp. 367 - 373.
template <typename RandomIterator, typename RandomIterator2, typename CompareFunction>
void Merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction)
{
size_t i1 = 0;
size_t i2 = size / 2;
size_t tempi = 0;
while (i1 < size / 2 && i2 < size)
{
if (compareFunction(a[i1], a[i2]))
{
temp[tempi] = a[i1];
i1++;
}
else
{
temp[tempi] = a[i2];
i2++;
}
tempi++;
}
while (i1 < size / 2)
{
temp[tempi] = a[i1];
i1++;
tempi++;
}
while (i2 < size)
{
temp[tempi] = a[i2];
i2++;
tempi++;
}
// Copy sorted temp array into main array, a
ParallelFor(ZERO_SIZE, size, [&](size_t i)
{
a[i] = temp[i];
});
}
template <typename RandomIterator, typename RandomIterator2, typename CompareFunction>
void ParallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction)
{
if (numThreads == 1)
{
std::sort(a, a + size, compareFunction);
}
else if (numThreads > 1)
{
std::vector<future<void>> pool;
pool.reserve(2);
auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads)
{
ParallelMergeSort(begin, k2, temp, numThreads, compareFunction);
};
pool.emplace_back(Internal::Async([=]()
{
launchRange(a, size / 2, temp, numThreads / 2);
}));
pool.emplace_back(Internal::Async([=]()
{
launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2);
}));
// Wait for jobs to finish
for (auto& f : pool)
{
if (f.valid())
{
f.wait();
}
}
Merge(a, size, temp, compareFunction);
}
}
} // namespace Internal
template <typename RandomIterator, typename T>
void ParallelFill(const RandomIterator& begin, const RandomIterator& end, const T& value, ExecutionPolicy policy)
{
auto diff = end - begin;
if (diff <= 0)
{
return;
}
#if defined(CUBBYFLOW_TASKING_HPX)
hpx::parallel::fill(hpx::parallel::execution::par, begin, end, value);
#else
size_t size = static_cast<size_t>(diff);
ParallelFor(ZERO_SIZE, size, [begin, value](size_t i)
{
begin[i] = value;
}, policy);
#endif
}
// Adopted from http://ideone.com/Z7zldb
template <typename IndexType, typename Function>
void ParallelFor(IndexType beginIndex, IndexType endIndex, const Function& function, ExecutionPolicy policy)
{
if (beginIndex > endIndex)
{
return;
}
if (policy == ExecutionPolicy::Parallel)
{
#if defined(CUBBYFLOW_TASKING_TBB)
(void)policy;
tbb::parallel_for(beginIndex, endIndex, function);
#elif defined(CUBBYFLOW_TASKING_HPX)
(void)policy;
hpx::parallel::for_loop(hpx::parallel::execution::par, beginIndex, endIndex, function);
#elif defined(CUBBYFLOW_TASKING_CPP11THREAD)
// Estimate number of threads in the pool
const unsigned int numThreadsHint = GetMaxNumberOfThreads();
const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint;
// Size of a slice for the range functions
IndexType n = endIndex - beginIndex + 1;
IndexType slice = static_cast<IndexType>(std::round(n / static_cast<double>(numThreads)));
slice = std::max(slice, IndexType(1));
// [Helper] Inner loop
auto launchRange = [&function](IndexType k1, IndexType k2)
{
for (IndexType k = k1; k < k2; ++k)
{
function(k);
}
};
// Create pool and launch jobs
std::vector<std::thread> pool;
pool.reserve(numThreads);
IndexType i1 = beginIndex;
IndexType i2 = std::min(beginIndex + slice, endIndex);
for (unsigned int i = 0; i + 1 < numThreads && i1 < endIndex; ++i)
{
pool.emplace_back(launchRange, i1, i2);
i1 = i2;
i2 = std::min(i2 + slice, endIndex);
}
if (i1 < endIndex)
{
pool.emplace_back(launchRange, i1, endIndex);
}
// Wait for jobs to finish
for (std::thread& t : pool)
{
if (t.joinable())
{
t.join();
}
}
#else
(void)policy;
#if defined(CUBBYFLOW_TASKING_OPENMP)
#pragma omp parallel for
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
for (ssize_t i = beginIndex; i < static_cast<ssize_t>(endIndex); ++i)
{
#else // !MSVC || Intel
for (auto i = beginIndex; i < endIndex; ++i)
{
#endif // MSVC && !Intel
function(i);
}
#else // CUBBYFLOW_TASKING_SERIAL
for (auto i = beginIndex; i < endIndex; ++i)
{
function(i);
}
#endif // CUBBYFLOW_TASKING_OPENMP
#endif
}
else
{
for (auto i = beginIndex; i < endIndex; ++i)
{
function(i);
}
}
}
template <typename IndexType, typename Function>
void ParallelRangeFor(IndexType beginIndex, IndexType endIndex,
const Function& function, ExecutionPolicy policy) {
if (beginIndex > endIndex) {
return;
}
if (policy == ExecutionPolicy::Parallel) {
#if defined(CUBBYFLOW_TASKING_TBB)
tbb::parallel_for(
tbb::blocked_range<IndexType>(beginIndex, endIndex),
[&function](const tbb::blocked_range<IndexType>& range) {
function(range.begin(), range.end());
});
#else
// Estimate number of threads in the pool
const unsigned int numThreadsHint = GetMaxNumberOfThreads();
const unsigned int numThreads =
numThreadsHint == 0u ? 8u : numThreadsHint;
// Size of a slice for the range functions
IndexType n = endIndex - beginIndex + 1;
IndexType slice = static_cast<IndexType>(
std::round(n / static_cast<double>(numThreads)));
slice = std::max(slice, IndexType(1));
// Create pool and launch jobs
std::vector<CubbyFlow::Internal::future<void>> pool;
pool.reserve(numThreads);
IndexType i1 = beginIndex;
IndexType i2 = std::min(beginIndex + slice, endIndex);
for (unsigned int i = 0; i + 1 < numThreads && i1 < endIndex; ++i) {
pool.emplace_back(Internal::Async([=]() { function(i1, i2); }));
i1 = i2;
i2 = std::min(i2 + slice, endIndex);
}
if (i1 < endIndex) {
pool.emplace_back(
Internal::Async([=]() { function(i1, endIndex); }));
}
// Wait for jobs to finish
for (auto& f : pool) {
if (f.valid()) {
f.wait();
}
}
#endif
}
else {
function(beginIndex, endIndex);
}
}
template <typename IndexType, typename Function>
void ParallelFor(
IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
const Function& function, ExecutionPolicy policy)
{
ParallelFor(beginIndexY, endIndexY, [&](IndexType j)
{
for (IndexType i = beginIndexX; i < endIndexX; ++i)
{
function(i, j);
}
}, policy);
}
template <typename IndexType, typename Function>
void ParallelRangeFor(
IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
const Function& function, ExecutionPolicy policy)
{
ParallelRangeFor(beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd)
{
function(beginIndexX, endIndexX, jBegin, jEnd);
}, policy);
}
template <typename IndexType, typename Function>
void ParallelFor(
IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
IndexType beginIndexZ, IndexType endIndexZ,
const Function& function, ExecutionPolicy policy)
{
ParallelFor(beginIndexZ, endIndexZ, [&](IndexType k)
{
for (IndexType j = beginIndexY; j < endIndexY; ++j)
{
for (IndexType i = beginIndexX; i < endIndexX; ++i)
{
function(i, j, k);
}
}
}, policy);
}
template <typename IndexType, typename Function>
void ParallelRangeFor(
IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
IndexType beginIndexZ, IndexType endIndexZ,
const Function& function, ExecutionPolicy policy)
{
ParallelRangeFor(beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd)
{
function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd);
}, policy);
}
template <typename IndexType, typename Value, typename Function, typename Reduce>
Value ParallelReduce(IndexType beginIndex, IndexType endIndex,
const Value& identity, const Function& function, const Reduce& reduce, ExecutionPolicy policy)
{
if (beginIndex > endIndex)
{
return identity;
}
if (policy == ExecutionPolicy::Parallel)
{
#if defined(CUBBYFLOW_TASKING_TBB)
return tbb::parallel_reduce(
tbb::blocked_range<IndexType>(beginIndex, endIndex), identity,
[&function](const tbb::blocked_range<IndexType>& range, const Value& init)
{
return function(range.begin(), range.end(), init);
}, reduce);
#else
// Estimate number of threads in the pool
const unsigned int numThreadsHint = GetMaxNumberOfThreads();
const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint;
// Size of a slice for the range functions
IndexType n = endIndex - beginIndex + 1;
IndexType slice = static_cast<IndexType>(std::round(n / static_cast<double>(numThreads)));
slice = std::max(slice, IndexType(1));
// Results
std::vector<Value> results(numThreads, identity);
// [Helper] Inner loop
auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid)
{
results[tid] = function(k1, k2, identity);
};
// Create pool and launch jobs
std::vector<CubbyFlow::Internal::future<void>> pool;
pool.reserve(numThreads);
IndexType i1 = beginIndex;
IndexType i2 = std::min(beginIndex + slice, endIndex);
unsigned int threadID = 0;
for (; threadID + 1 < numThreads && i1 < endIndex; ++threadID)
{
pool.emplace_back(Internal::Async([=]()
{
launchRange(i1, i2, threadID);
}));
i1 = i2;
i2 = std::min(i2 + slice, endIndex);
}
if (i1 < endIndex)
{
pool.emplace_back(Internal::Async([=]()
{
launchRange(i1, endIndex, threadID);
}));
}
// Wait for jobs to finish
for (auto& f : pool)
{
if (f.valid())
{
f.wait();
}
}
// Gather
Value finalResult = identity;
for (const Value& val : results)
{
finalResult = reduce(val, finalResult);
}
return finalResult;
#endif
}
(void)reduce;
return function(beginIndex, endIndex, identity);
}
template <typename RandomIterator>
void ParallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy)
{
ParallelSort(begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy);
}
template <typename RandomIterator, typename CompareFunction>
void ParallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy)
{
if (begin > end)
{
return;
}
if (policy == ExecutionPolicy::Parallel)
{
#if defined(CUBBYFLOW_TASKING_HPX)
hpx::parallel::sort(hpx::parallel::execution::par, begin, end, compareFunction);
#elif defined(CUBBYFLOW_TASKING_TBB)
tbb::parallel_sort(begin, end, compareFunction);
#else
size_t size = static_cast<size_t>(end - begin);
using value_type = typename std::iterator_traits<RandomIterator>::value_type;
std::vector<value_type> temp(size);
// Estimate number of threads in the pool
const unsigned int numThreadsHint = GetMaxNumberOfThreads();
const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint;
Internal::ParallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction);
#endif
}
else
{
std::sort(begin, end, compareFunction);
}
}
} // namespace CubbyFlow
#endif
|
Vec.h | #ifndef VEC_H
#define VEC_H
/*
Szymon Rusinkiewicz
Princeton University
Vec.h
Class for a constant-length vector
Supports the following operations:
vec v1; // Initialized to (0, 0, 0)
vec v2(1.23f); // Initialized to (1.23f, 1.23f, 1.23f)
vec v3(1, 2, 3); // Initialized to (1, 2, 3)
vec v4(v3); // Copy constructor
float farray[3];
vec v5 = vec(farray); // Explicit: "v4 = farray" won't work
Vec<3,double> vd; // The "vec" used above is Vec<3,float>
point p1, p2, p3; // Same as vec
v3 = v1 + v2; // Also -, *, / (all componentwise)
v3 = 3.5f * v1; // Also vec * scalar, vec / scalar
// NOTE: scalar has to be the same type:
// it won't work to do double * vec<float>
v1 = min(v2, v3); // Componentwise min/max
v1 = sin(v2); // Componentwise - all the usual functions...
swap(v1, v2); // In-place swap
v3 = v1 DOT v2; // Actually operator^
v3 = v1 CROSS v2; // Actually operator%
float f = v1[0]; // Subscript
float *fp = v1; // Implicit conversion to float *
f = len(v1); // Length (also len2 == squared length)
f = dist(p1, p2); // Distance (also dist2 == squared distance)
normalize(v1); // Normalize (i.e., make it unit length)
// normalize(vec(0,0,0)) => vec(1,0,0)
v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted)
cout << v1 << endl; // iostream output in the form (1,2,3)
cin >> v2; // iostream input using the same syntax
Also defines the utility functions sqr, cube, sgn, fract, clamp, mix,
step, smoothstep, faceforward, reflect, refract, and angle
*/
// Windows defines min and max as macros, which prevents us from using the
// type-safe versions from std::, as well as interfering with method defns.
// Also define NOMINMAX, which prevents future bad definitions.
#ifdef min
# undef min
#endif
#ifdef max
# undef max
#endif
#ifndef NOMINMAX
# define NOMINMAX
#endif
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cstddef>
#include <cmath>
#include <iterator>
#include <stdexcept>
#include <iostream>
#include <algorithm>
// Let gcc optimize conditional branches a bit better...
#ifndef likely
# if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
# define likely(x) (x)
# define unlikely(x) (x)
# else
# define likely(x) (__builtin_expect((x), 1))
# define unlikely(x) (__builtin_expect((x), 0))
# endif
#endif
namespace trimesh {
using ::std::size_t;
// Boost-like compile-time assertion checking
template <bool X> struct VEC_STATIC_ASSERTION_FAILURE;
template <> struct VEC_STATIC_ASSERTION_FAILURE<true>
{ void operator () () {} };
#define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>()
// Vec class declaration
template <size_t D, class T = float>
class Vec {
public:
// Types
typedef T value_type;
typedef value_type *pointer;
typedef const value_type *const_pointer;
typedef value_type &reference;
typedef const value_type &const_reference;
typedef value_type *iterator;
typedef const value_type *const_iterator;
typedef ::std::reverse_iterator<iterator> reverse_iterator;
typedef ::std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef ::std::size_t size_type;
typedef ::std::ptrdiff_t difference_type;
protected:
// The internal representation: standard array
T v[D];
public:
// Constructor for no arguments. Everything initialized to 0.
Vec() { for (size_type i = 0; i < D; i++) v[i] = T(0); }
// Uninitialized constructor - meant mostly for internal use
#define VEC_UNINITIALIZED ((void *) 0)
Vec(void *) {}
// Constructor for one argument - default value. Explicit.
explicit Vec(const T &x)
{ for (size_type i = 0; i < D; i++) v[i] = x; }
// Constructors for 2-4 arguments
Vec(const T &x, const T &y)
{ VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; }
Vec(const T &x, const T &y, const T &z)
{ VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; }
Vec(const T &x, const T &y, const T &z, const T &w)
{ VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; }
// Constructor from anything that can be accessed using []
// Pretty aggressive, so marked as explicit.
template <class S> explicit Vec(const S &x)
{ for (size_type i = 0; i < D; i++) v[i] = x[i]; }
// Using default copy constructor, assignment operator, and destructor
// Array reference - no bounds checking
reference operator [] (size_type i)
{ return v[i]; }
reference operator [] (int i)
{ return v[i]; }
const_reference operator [] (size_type i) const
{ return v[i]; }
const_reference operator [] (int i) const
{ return v[i]; }
// Array reference with bounds checking
reference at(size_type i)
{
if (i >= D)
throw ::std::out_of_range("Vec::at");
return v[i];
}
const_reference at(size_type i) const
{
if (i >= D)
throw ::std::out_of_range("Vec::at");
return v[i];
}
// Other accessors, for compatibility with std::array
reference front()
{ return v[0]; }
const_reference front() const
{ return v[0]; }
reference back()
{ return v[D-1]; }
const_reference back() const
{ return v[D-1]; }
// Conversion to pointer
operator T * ()
{ return v; }
operator const T * ()
{ return v; }
operator const T * () const
{ return v; }
pointer data()
{ return v; }
const_pointer data() const
{ return v; }
// Iterators
iterator begin()
{ return v; }
const_iterator begin() const
{ return v; }
const_iterator cbegin() const
{ return v; }
iterator end()
{ return begin() + D; }
const_iterator end() const
{ return begin() + D; }
const_iterator cend() const
{ return begin() + D; }
reverse_iterator rbegin()
{ return reverse_iterator(end()); }
const_reverse_iterator rbegin() const
{ return const_reverse_iterator(end()); }
const_reverse_iterator crbegin() const
{ return const_reverse_iterator(end()); }
reverse_iterator rend()
{ return reverse_iterator(begin()); }
const_reverse_iterator rend() const
{ return const_reverse_iterator(begin()); }
const_reverse_iterator crend() const
{ return const_reverse_iterator(begin()); }
// Capacity
size_type size() const
{ return D; }
size_type max_size() const
{ return D; }
// empty() and clear() - check for all zero or set to zero
bool empty() const
{
for (size_type i = 0; i < D; i++)
if (v[i]) return false;
return true;
}
void clear()
{ for (size_type i = 0; i < D; i++) v[i] = T(0); }
// Set all elements to some constant
void fill(const value_type &x)
{
for (size_type i = 0; i < D; i++)
v[i] = x;
}
Vec<D,T> &operator = (const value_type &x)
{
for (size_type i = 0; i < D; i++)
v[i] = x;
return *this;
}
// Member operators
Vec<D,T> &operator += (const Vec<D,T> &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] += x[i];
return *this;
}
Vec<D,T> &operator -= (const Vec<D,T> &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] -= x[i];
return *this;
}
Vec<D,T> &operator *= (const Vec<D,T> &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x[i];
return *this;
}
Vec<D,T> &operator *= (const T &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x;
return *this;
}
Vec<D,T> &operator /= (const Vec<D,T> &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x[i];
return *this;
}
Vec<D,T> &operator /= (const T &x)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x;
return *this;
}
// Set each component to min/max of this and the other vector
Vec<D,T> &min(const Vec<D,T> &x)
{
#pragma omp critical
for (size_type i = 0; i < D; i++)
if (x[i] < v[i]) v[i] = x[i];
return *this;
}
Vec<D,T> &max(const Vec<D,T> &x)
{
#pragma omp critical
for (size_type i = 0; i < D; i++)
if (x[i] > v[i]) v[i] = x[i];
return *this;
}
// Swap with another vector. (Also exists as a global function.)
void swap(Vec<D,T> &x)
{
using namespace ::std;
#pragma omp critical
for (size_type i = 0; i < D; i++) swap(v[i], x[i]);
}
// Outside of class: + - * / % ^ << >> == != < > <= >=
// Dot product with another vector (also exists as an operator)
value_type dot(const Vec<D,T> &x) const
{
value_type total = v[0] * x[0];
for (size_type i = 1; i < D; i++)
total += v[i] * x[i];
return total;
}
// Cross product with another vector (also exists as an operator)
Vec<3,T> cross(const Vec<3,T> &x) const
{
VEC_STATIC_CHECK(D == 3);
return Vec<3,T>(v[1]*x[2] - v[2]*x[1],
v[2]*x[0] - v[0]*x[2],
v[0]*x[1] - v[1]*x[0]);
}
// Some partial compatibility with std::valarray, plus generalizations
value_type sum() const
{
value_type total = v[0];
for (size_type i = 1; i < D; i++)
total += v[i];
return total;
}
value_type sumabs() const
{
using namespace ::std;
value_type total = fabs(v[0]);
for (size_type i = 1; i < D; i++)
total += fabs(v[i]);
return total;
}
value_type avg() const
{ return sum() / D; }
value_type mean() const
{ return sum() / D; }
value_type product() const
{
value_type total = v[0];
for (size_type i = 1; i < D; i++)
total *= v[i];
return total;
}
value_type min() const
{
value_type m = v[0];
for (size_type i = 1; i < D; i++)
if (v[i] < m)
m = v[i];
return m;
}
value_type max() const
{
value_type m = v[0];
for (size_type i = 1; i < D; i++)
if (v[i] > m)
m = v[i];
return m;
}
Vec<D,T> apply(value_type func(value_type)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = func(v[i]);
return result;
}
Vec<D,T> apply(value_type func(const value_type&)) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = func(v[i]);
return result;
}
Vec<D,T> cshift(int n) const
{
Vec<D,T> result(VEC_UNINITIALIZED);
if (n < 0)
n = (n % D) + D;
for (size_type i = 0; i < D; i++)
result[i] = v[(i+n)%D];
return result;
}
Vec<D,T> shift(int n) const
{
using namespace ::std;
if (abs(n) >= D)
return Vec<D,T>();
Vec<D,T> result; // Must be initialized to zero
size_type start = n < 0 ? -n : 0;
size_type stop = n > 0 ? D - n : D;
for (size_type i = start; i < stop; i++)
result[i] = v[i+n];
return result;
}
// TODO for C++11: std::get()
}; // class Vec
// Shorthands for particular flavors of Vecs
typedef Vec<3,float> vec;
typedef Vec<3,float> point;
typedef Vec<2,float> vec2;
typedef Vec<3,float> vec3;
typedef Vec<4,float> vec4;
typedef Vec<2,int> ivec2;
typedef Vec<3,int> ivec3;
typedef Vec<4,int> ivec4;
// Nonmember operators that take two Vecs
template <size_t D, class T>
static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] + v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] - v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] * v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] / v2[i];
return result;
}
// Dot product
template <size_t D, class T>
static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
T sum = v1[0] * v2[0];
for (size_t i = 1; i < D; i++)
sum += v1[i] * v2[i];
return sum;
}
#define DOT ^
// Cross product - only in 3 dimensions
template <class T>
static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]);
}
#define CROSS %
// Component-wise equality and inequality (#include the usual caveats
// about comparing floats for equality...)
template <size_t D, class T>
static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
for (size_t i = 0; i < D; i++)
if (v1[i] != v2[i])
return false;
return true;
}
template <size_t D, class T>
static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
for (size_t i = 0; i < D; i++)
if (v1[i] != v2[i])
return true;
return false;
}
// Comparison by lexicographical ordering - not necessarily useful on its own,
// but necessary in order to put Vecs in sets, maps, etc.
template <size_t D, class T>
static inline bool operator < (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
for (size_t i = 0; i < D; i++) {
if (v1[i] < v2[i])
return true;
else if (v1[i] > v2[i])
return false;
}
return false;
}
template <size_t D, class T>
static inline bool operator > (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return v2 < v1;
}
template <size_t D, class T>
static inline bool operator <= (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return !(v2 < v1);
}
template <size_t D, class T>
static inline bool operator >= (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return !(v1 < v2);
}
// Unary operators
template <size_t D, class T>
static inline const Vec<D,T> &operator + (const Vec<D,T> &v)
{
return v;
}
template <size_t D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = -v[i];
return result;
}
template <size_t D, class T>
static inline bool operator ! (const Vec<D,T> &v)
{
return v.empty();
}
// Vec/scalar operators
template <size_t D, class T>
static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x * v[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] * x;
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x / v[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x)
{
using namespace ::std;
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] / x;
return result;
}
// iostream operators
template <size_t D, class T>
static inline ::std::ostream &operator << (::std::ostream &os, const Vec<D,T> &v)
{
using namespace ::std;
os << "(";
for (size_t i = 0; i < D-1; i++)
os << v[i] << ", ";
return os << v[D-1] << ")";
}
template <size_t D, class T>
static inline ::std::istream &operator >> (::std::istream &is, Vec<D,T> &v)
{
using namespace ::std;
char c1 = 0, c2 = 0;
is >> c1;
if (c1 == '(' || c1 == '[') {
is >> v[0] >> ws >> c2;
for (size_t i = 1; i < D; i++) {
if (c2 == ',')
is >> v[i] >> ws >> c2;
else
is.setstate(ios::failbit);
}
}
if (c1 == '(' && c2 != ')')
is.setstate(ios::failbit);
else if (c1 == '[' && c2 != ']')
is.setstate(ios::failbit);
return is;
}
// Utility functions for square and cube, to go along with sqrt and cbrt
template <class T>
static inline T sqr(const T &x)
{
return x*x;
}
template <class T>
static inline T cube(const T &x)
{
return x*x*x;
}
// Sign of a scalar. Note that sgn(0) == 1.
template <class T>
static inline T sgn(const T &x)
{
return (x < T(0)) ? T(-1) : T(1);
}
// Utility functions based on GLSL
template <class T>
static inline T fract(const T &x)
{
return x - floor(x);
}
template <class T>
static inline T clamp(const T &x, const T &a, const T &b)
{
return x > a ? x < b ? x : b : a; // returns a on NaN
}
template <class T, class S>
static inline T mix(const T &x, const T &y, const S &a)
{
return (S(1)-a) * x + a * y;
}
template <class T>
static inline T step(const T &a, const T &x)
{
return x < a ? T(0) : T(1);
}
template <class T>
static inline T smoothstep(const T &a, const T &b, const T &x)
{
if (b <= a) return step(x,a);
T t = (x - a) / (b - a);
return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t);
}
template <size_t D, class T>
static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I,
const Vec<D,T> &Nref)
{
return ((Nref DOT I) < T(0)) ? N : -N;
}
template <size_t D, class T>
static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N)
{
return I - (T(2) * (N DOT I)) * N;
}
template <size_t D, class T>
static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N,
const T &eta)
{
using namespace ::std;
T NdotI = N DOT I;
T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI));
return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * sqrt(k)) * N;
}
// Squared length
template <size_t D, class T>
static inline const T len2(const Vec<D,T> &v)
{
using namespace ::std;
T l2 = v[0] * v[0];
for (size_t i = 1; i < D; i++)
l2 += v[i] * v[i];
return l2;
}
// Length
template <size_t D, class T>
static inline const T len(const Vec<D,T> &v)
{
using namespace ::std;
return sqrt(len2(v));
}
// Squared distance
template <size_t D, class T>
static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
T d2 = sqr(v2[0]-v1[0]);
for (size_t i = 1; i < D; i++)
d2 += sqr(v2[i]-v1[i]);
return d2;
}
// Distance
template <size_t D, class T>
static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
return sqrt(dist2(v1,v2));
}
// In-place normalization to unit length
template <size_t D, class T>
static inline Vec<D,T> normalize(Vec<D,T> &v)
{
using namespace ::std;
T l = len(v);
if (unlikely(l <= T(0))) {
v[0] = T(1);
for (size_t i = 1; i < D; i++)
v[i] = T(0);
return v;
}
l = T(1) / l;
for (size_t i = 0; i < D; i++)
v[i] *= l;
return v;
}
// Area-weighted triangle face normal
template <class T>
static inline T trinorm(const T &v0, const T &v1, const T &v2)
{
return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0));
}
// Angle between two vectors
template <size_t D, class T>
static inline const T angle(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
return atan2(len(v1 CROSS v2), v1 DOT v2);
}
}; // namespace trimesh
/*
// POSIX / C99 compatibility functions for MSVS
#ifdef _WIN32
#ifdef cbrt
# undef cbrt
#endif
inline float cbrt(float x)
{
using namespace ::std;
return (x < 0.0f) ? -pow(-x, 1.0f / 3.0f) : pow(x, 1.0f / 3.0f);
}
inline double cbrt(double x)
{
using namespace ::std;
return (x < 0.0) ? -pow(-x, 1.0 / 3.0) : pow(x, 1.0 / 3.0);
}
inline long double cbrt(long double x)
{
using namespace ::std;
return (x < 0.0L) ? -pow(-x, 1.0L / 3.0L) : pow(x, 1.0L / 3.0L);
}
#ifdef round
# undef round
#endif
inline float round(float x)
{
return (x < 0.0f) ? float(int(x - 0.5f)) : float(int(x + 0.5f));
}
inline double round(double x)
{
return (x < 0.0f) ? double(int(x - 0.5)) : double(int(x + 0.5));
}
inline long double round(long double x)
{
return (x < 0.0f) ? (long double)(int(x - 0.5L)) : (long double)(int(x + 0.5L));
}
#ifdef trunc
# undef trunc
#endif
inline float trunc(float x)
{
return (x < 0.0f) ? float(int(x)) : float(int(x));
}
inline double trunc(double x)
{
return (x < 0.0f) ? double(int(x)) : double(int(x));
}
inline long double trunc(long double x)
{
return (x < 0.0f) ? (long double)(int(x)) : (long double)(int(x));
}
#endif // _WIN32
*/
// Generic macros for declaring 1-, 2-, and 3- argument
// componentwise functions on Vecs.
#define VEC_DECLARE_ONEARG(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i]); \
return result; \
}
// Vector-scalar, scalar-vector, and componentwise vector-vector versions
#define VEC_DECLARE_TWOARG_VS(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], a); \
return result; \
}
#define VEC_DECLARE_TWOARG_SV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const T &a, const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(a, v[i]); \
return result; \
}
#define VEC_DECLARE_TWOARG_VV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], w[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG_VSS(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a, const T &b) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], a, b); \
return result; \
}
#define VEC_DECLARE_THREEARG_SSV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const T &a, const T &b, const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(a, b, v[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG_VVV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w, const trimesh::Vec<D,T> &x) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], w[i], x[i]); \
return result; \
}
// The following is the list of functions in C89 and C++98, with the exception
// of frexp, ldexp, and modf (which have irregular calling conventions).
// They are supposed to be in namespace std, but Visual Studio and some
// older compilers also declare them in the global namespace.
// In the name of compatibility, we (reluctantly) do likewise.
VEC_DECLARE_ONEARG(acos)
VEC_DECLARE_ONEARG(asin)
VEC_DECLARE_ONEARG(atan)
VEC_DECLARE_TWOARG_VV(atan2)
VEC_DECLARE_ONEARG(ceil)
VEC_DECLARE_ONEARG(cos)
VEC_DECLARE_ONEARG(cosh)
VEC_DECLARE_ONEARG(exp)
VEC_DECLARE_ONEARG(fabs)
VEC_DECLARE_ONEARG(floor)
VEC_DECLARE_TWOARG_VS(fmod)
VEC_DECLARE_TWOARG_VV(fmod)
VEC_DECLARE_ONEARG(log)
VEC_DECLARE_ONEARG(log10)
VEC_DECLARE_TWOARG_VS(pow)
VEC_DECLARE_TWOARG_SV(pow)
VEC_DECLARE_TWOARG_VV(pow)
VEC_DECLARE_ONEARG(sin)
VEC_DECLARE_ONEARG(sinh)
VEC_DECLARE_ONEARG(sqrt)
VEC_DECLARE_ONEARG(tan)
VEC_DECLARE_ONEARG(tanh)
namespace std {
using ::acos;
using ::asin;
using ::atan;
using ::atan2;
using ::ceil;
using ::cos;
using ::cosh;
using ::exp;
using ::fabs;
using ::floor;
using ::fmod;
using ::log;
using ::log10;
using ::pow;
using ::sin;
using ::sinh;
using ::sqrt;
using ::tan;
using ::tanh;
};
// These are only in namespace std.
namespace std {
VEC_DECLARE_TWOARG_VS(min)
VEC_DECLARE_TWOARG_SV(min)
VEC_DECLARE_TWOARG_VV(min)
VEC_DECLARE_TWOARG_VS(max)
VEC_DECLARE_TWOARG_SV(max)
VEC_DECLARE_TWOARG_VV(max)
// Swap two Vecs. Not atomic, unlike class method.
template <size_t D, class T>
static inline void swap(const ::trimesh::Vec<D,T> &v1, const ::trimesh::Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++)
swap(v1[i], v2[i]);
}
};
// These are POSIX and are commonly used. Global namespace.
// Compatibility versions of these for MSVC are above.
VEC_DECLARE_ONEARG(cbrt)
VEC_DECLARE_ONEARG(round)
VEC_DECLARE_ONEARG(trunc)
// These are new functions declared in namespace trimesh.
namespace trimesh {
VEC_DECLARE_ONEARG(sqr)
VEC_DECLARE_ONEARG(cube)
VEC_DECLARE_ONEARG(sgn)
VEC_DECLARE_ONEARG(fract)
VEC_DECLARE_THREEARG_VSS(clamp)
VEC_DECLARE_THREEARG_VVV(clamp)
VEC_DECLARE_TWOARG_SV(step)
VEC_DECLARE_TWOARG_VV(step)
VEC_DECLARE_THREEARG_SSV(smoothstep)
VEC_DECLARE_THREEARG_VVV(smoothstep)
};
#undef VEC_DECLARE_ONEARG
#undef VEC_DECLARE_TWOARG_VS
#undef VEC_DECLARE_TWOARG_SV
#undef VEC_DECLARE_TWOARG_VV
#undef VEC_DECLARE_THREEARG_VSS
#undef VEC_DECLARE_THREEARG_SSV
#undef VEC_DECLARE_THREEARG_VVV
// Both valarrays and GLSL use abs() on a vector to mean fabs().
// Let's do the same...
template < ::std::size_t D, class T >
static inline trimesh::Vec<D,T> abs(const trimesh::Vec<D,T> &v)
{
return fabs(v);
}
namespace std {
using ::abs;
};
#endif
|
plm.c | /*
* plmc
* Copyright (c) 2016, John Ingraham
* john.ingraham@gmail.com
*/
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <assert.h>
#include <string.h>
/* Optionally include OpenMP with the -fopenmp flag */
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "include/twister.h"
#include "include/plm.h"
#include "include/inference.h"
/* Usage pattern */
const char *usage =
"plmc\n"
"\n"
"Usage:\n"
" plm [options] alignmentfile\n"
" plm -c couplingsfile alignmentfile\n"
" plm -o paramfile -c couplingsfile alignmentfile\n"
" plm [-h | --help]\n"
" \n"
" Required input:\n"
" alignmentfile Multiple sequence alignment in FASTA format\n"
"\n"
" Options, output:\n"
" -c --couplings couplingsfile Save coupling scores to file (text)\n"
" -o --output paramfile Save estimated parameters to file (binary)\n"
"\n"
" Options, alignment processing:\n"
" -s --scale <value> Sequence weights: neighborhood weight [s > 0]\n"
" -t --theta <value> Sequence weights: neighborhood divergence [0 < t < 1]\n"
"\n"
" Options, Maximum a posteriori estimation (L-BFGS, default):\n"
" -lh --lambdah <value> Set L2 lambda for fields (h_i)\n"
" -le --lambdae <value> Set L2 lambda for couplings (e_ij)\n"
" -lg --lambdag <value> Set group L1 lambda for couplings (e_ij)\n"
"\n"
" Options, general:\n"
" --fast Fast weights and stochastic gradient descent\n"
" -a --alphabet alphabet Alternative character set to use for analysis\n"
" -f --focus identifier Select only uppercase, non-gapped sites from a focus sequence\n"
" -g --gapignore Model sequence likelihoods only by coding, non-gapped portions\n"
" -m --maxiter Maximum number of iterations\n"
" -n --ncores [<number>|max] Maximum number of threads to use in OpenMP\n"
" -h --help Usage\n"
" -hw --heniweights weightsFile Use repeats correction with henikoff phylogeny reweighting\n"
" -rw --repeatweights weightsFile Use repeats correction\n\n";
/* Internal functions to MSARead */
void MSAReadSeq(char *seq, FILE *fpAli);
letter_t MSAReadCode(char c, char *alphabet, int nCodes);
/* Global verbosity & profiling options */
int verbose = 2;
/* Reference amino acid indexing */
const char *codesAA = "-ACDEFGHIKLMNPQRSTVWY";
/* Default parameters */
const numeric_t REGULARIZATION_LAMBDA_H = 0.01;
const numeric_t REGULARIZATION_LAMBDA_E = 100.0;
const numeric_t REGULARIZATION_LAMBDA_GROUP = 0.0;
const numeric_t REWEIGHTING_THETA = 0.20;
const numeric_t REWEIGHTING_SCALE = 1.0;
const int ZERO_APC_PRIORS = 0;
const int SGD_BATCH_SIZE = 2048;
const int REWEIGHTING_SAMPLES = 5000;
int main(int argc, char **argv) {
char *alignFile = NULL;
char *outputFile = NULL;
char *couplingsFile = NULL;
char *repeatWeightsFile = NULL;
char *heniWeightsFile = NULL;
/* Default options */
options_t *options = (options_t *) malloc(sizeof(options_t));
options->lambdaH = REGULARIZATION_LAMBDA_H;
options->lambdaE = REGULARIZATION_LAMBDA_E;
options->lambdaGroup = REGULARIZATION_LAMBDA_GROUP;
options->scale = REWEIGHTING_SCALE;
options->zeroAPC = 0;
options->maxIter = 0;
options->usePairs = 1;
options->estimator = INFER_MAP;
options->estimatorMAP = INFER_MAP_PLM;
options->target = NULL;
options->alphabet = (char *) codesAA;
/* Sequence weight options */
options->fastWeights = 0;
options->theta = REWEIGHTING_THETA;
/* SGD options */
options->sgd = 0;
options->sgdBatchSize = SGD_BATCH_SIZE;
/* Initialize PRNG */
init_genrand(42);
/* Print usage if no arguments */
if (argc == 1) {
fprintf(stderr, "%s", usage);
exit(1);
}
/* Parse command line arguments */
for (int arg = 1; arg < argc; arg++) {
if ((arg < argc-1) && (strcmp(argv[arg], "--output") == 0
|| strcmp(argv[arg], "-o") == 0)) {
outputFile = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--alphabet") == 0
|| strcmp(argv[arg], "-a") == 0)) {
options->alphabet = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--couplings") == 0
|| strcmp(argv[arg], "-c") == 0)) {
couplingsFile = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdah") == 0
|| strcmp(argv[arg], "-lh") == 0)) {
options->lambdaH = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdae") == 0
|| strcmp(argv[arg], "-le") == 0)) {
options->lambdaE = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdag") == 0
|| strcmp(argv[arg], "-lg") == 0)) {
options->lambdaGroup = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--theta") == 0
|| strcmp(argv[arg], "-t") == 0)) {
options->theta = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--scale") == 0
|| strcmp(argv[arg], "-s") == 0)) {
options->scale = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--maxiter") == 0
|| strcmp(argv[arg], "-m") == 0)) {
options->maxIter = atoi(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--independent") == 0
|| strcmp(argv[arg], "-i") == 0)) {
options->usePairs = 0;
fprintf(stderr, "Independent model not yet implemented\n");
exit(0);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--gapreduce") == 0
|| strcmp(argv[arg], "-g") == 0)) {
options->estimatorMAP = INFER_MAP_PLM_GAPREDUCE;
} else if ((arg < argc-1) && (strcmp(argv[arg], "--estimatele") == 0
|| strcmp(argv[arg], "-ee") == 0)) {
options->zeroAPC = 1;
} else if ((arg < argc-1) && (strcmp(argv[arg], "--focus") == 0
|| strcmp(argv[arg], "-f") == 0)) {
options->target = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--repeatweights") == 0
|| strcmp(argv[arg], "-rw") == 0)) {
repeatWeightsFile = argv[++arg];
fprintf(stderr,"repeat weights method chosen!\n");
} else if ((arg < argc-1) && (strcmp(argv[arg], "--heniweights") == 0
|| strcmp(argv[arg], "-hw") == 0)) {
heniWeightsFile = argv[++arg];
} else if ((arg < argc-1) && strcmp(argv[arg], "--fast") == 0) {
options->sgd = 1;
options->fastWeights = 100;
} else if ((arg < argc-1) && (strcmp(argv[arg], "--ncores") == 0
|| strcmp(argv[arg], "-n") == 0)) {
#if defined(_OPENMP)
if (strcmp(argv[arg + 1], "max") == 0) {
int maxThreads = omp_get_max_threads();
/* Redundant, but serves as sanity check */
omp_set_num_threads(maxThreads);
fprintf(stderr, "OpenMP: Using %d of %d threads\n",
maxThreads, maxThreads);
} else {
int numThreads = atoi(argv[arg + 1]);
int maxThreads = omp_get_max_threads();
if (numThreads >= 1 && numThreads <= maxThreads) {
omp_set_num_threads(numThreads);
fprintf(stderr, "OpenMP: Using %d of %d threads\n",
numThreads, maxThreads);
} else if (numThreads > maxThreads) {
omp_set_num_threads(maxThreads);
fprintf(stderr, "OpenMP: More threads requested than "
"available. Using %d of %d threads instead.\n",
maxThreads, maxThreads);
} else {
omp_set_num_threads(1);
fprintf(stderr, "OpenMP: Using 1 of %d threads\n",
maxThreads);
}
}
arg++;
#else
fprintf(stderr, "Error (-n/--ncores) only available when "
"compiled with OpenMP\n");
exit(1);
#endif
} else if (strcmp(argv[arg], "--help") == 0
|| strcmp(argv[arg], "-h") == 0) {
fprintf(stderr, "%s", usage);
exit(1);
}
}
alignFile = argv[argc - 1];
/* Read multiple seqence alignment */
alignment_t *ali = MSARead(alignFile, options);
if (heniWeightsFile == NULL) {
/* Reweight sequences by inverse neighborhood density */
MSAReweightSequences(repeatWeightsFile,ali, options);
} else {
/* Use simpler Henikoff weights */
fprintf(stderr,"Henikoff phylogeny reweighting chosen!!");
MSAReweightSequencesHenikoff(heniWeightsFile,ali,options);
}
/* Compute sitwise and pairwise marginal distributions */
MSACountMarginals(ali, options);
/* Infer model parameters */
numeric_t *x = InferPairModel(ali, options);
/* (Optionally) Output estimated parameters and coupling scores */
if (outputFile != NULL)
OutputParametersFull(outputFile, x, ali, options);
if (couplingsFile != NULL)
OutputCouplingScores(couplingsFile, x, ali, options);
/* Free alignment and options */
MSAFree(ali, options);
}
alignment_t *MSARead(char *alignFile, options_t *options) {
/* Read FASTA-formatted alignment */
FILE *fpAli = NULL;
if (alignFile != NULL) {
fpAli = fopen(alignFile, "r");
} else {
fprintf(stderr, "Must specify alignment file: -a ALIGN_FILE\n");
exit(1);
}
if (fpAli == NULL) {
fprintf(stderr, "Error opening alignment file\n");
exit(1);
}
/* Allocate alignment */
alignment_t *ali = (alignment_t *) malloc(sizeof(alignment_t));
ali->nSeqs = ali->nSites = ali->nCodes = 0;
ali->alphabet = options->alphabet;
ali->names = NULL;
ali->sequences = NULL;
ali->target = -1;
ali->offsets = NULL;
ali->nEff = 0;
/* Ada added repeatWeights initialization */
ali->weights = ali->fi = ali->fij = NULL;
ali->nParams = 0;
/* Verify alignment dimensions and structure (first pass through file) */
char name[BUFFER_SIZE];
char seq[BUFFER_SIZE];
/* Read first line as name */
fgetstr(name, fpAli);
if (*name == '>') {
MSAReadSeq(seq, fpAli);
} else {
fprintf(stderr, "Error reading alignment:"
" First line should start with >\n");
exit(1);
}
ali->nCodes = strlen(ali->alphabet);
ali->nSites = strlen(seq);
ali->nSeqs = 1;
while (!feof(fpAli)) {
char c = fgetc(fpAli);
if (c == '>') {
/* Read name and sequence pair */
fgetstr(name, fpAli);
MSAReadSeq(seq, fpAli);
} else {
fprintf(stderr, "Error reading alignment:"
" sequence records should start with >\n");
exit(1);
}
/* Validate sequence length */
if (strlen(seq) != ali->nSites) {
fprintf(stderr,
"Incompatible sequence length (%lu should be %d) for %s:\n%s\n",
strlen(seq), ali->nSites, name, seq);
exit(1);
}
ali->nSeqs++;
}
/* Encode full alignment block (second pass through file) */
ali->sequences = (letter_t *)
malloc(ali->nSites * ali->nSeqs * sizeof(letter_t));
ali->names = (char **) malloc(ali->nSeqs * sizeof(char *));
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++) seq(s, i) = 0;
for (int s = 0; s < ali->nSeqs; s++) ali->names[s] = NULL;
rewind(fpAli);
for (int s = 0; s < ali->nSeqs; s++) {
/* >Name */
getc(fpAli);
fgetstr(name, fpAli);
ali->names[s] = (char *) malloc((strlen(name) + 1) * sizeof(char));
strcpy(ali->names[s], name);
/* Sequence */
MSAReadSeq(seq, fpAli);
for (int i = 0; i < ali->nSites; i++)
seq(s, i) = MSAReadCode(seq[i], ali->alphabet, ali->nCodes);
}
/* --------------------------------_DEBUG_--------------------------------*/
/* Alignment to stderr */
// for (int s = 0; s < 10; s++) {
// for (int s = 0; s < ali->nSeqs; s++) {
// for (int i = 0; i < ali->nSites; i++)
// if (seq(s, i) >= 0 && seq(s, i) < ali->nCodes) {
// fprintf(stderr, "%c", ali->alphabet[seq(s, i)]);
// } else if (seq(s, i) >= -ali->nCodes && seq(s, i) < 0) {
// fprintf(stderr, "%c",
// tolower(ali->alphabet[seq(s, i) + ali->nCodes]));
// } else {
// fprintf(stderr, "*%d*", seq(s, i));
// }
// fprintf(stderr, "\n");
// }
// exit(0);
/* --------------------------------^DEBUG^--------------------------------*/
/* Focus mode: If a focus sequence (target) is provided, locate it */
if (options->target != NULL) {
for (int s = 0; s < ali->nSeqs; s++)
if (strncmp(options->target, ali->names[s],
strlen(options->target)) == 0) {
if (ali->target >= 0) {
fprintf(stderr,
"Multiple sequences start with %s, picking sequence %d\n",
options->target, s + 1);
} else {
ali->target = s;
}
}
if (ali->target >= 0) {
fprintf(stderr, "Found focus %s as sequence %d\n", options->target,
ali->target + 1);
} else {
fprintf(stderr,
"Could not find %s, proceeding without focus sequence\n",
options->target);
}
}
/* Always discard any sequences (rows) with out-of-alphabet characters */
int* seqValid = (int *) malloc(ali->nSeqs * sizeof(int));
for (int s = 0; s < ali->nSeqs; s++) seqValid[s] = 0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if ((seq(s, i) >= -ali->nCodes) && (seq(s, i) < ali->nCodes))
seqValid[s]++;
int nValidSeqs = 0;
for (int s = 0; s < ali->nSeqs; s++)
if (seqValid[s] == ali->nSites) nValidSeqs++;
fprintf(stderr, "%d valid sequences out of %d \n", nValidSeqs, ali->nSeqs);
/* Recored indices of skipped sequences */
ali->nSkippedSeqs = ali->nSeqs - nValidSeqs;
ali->skippedSeqs = (int *) malloc(ali->nSkippedSeqs * sizeof(int));
for (int s = 0, skipIndex = 0; s < ali->nSeqs; s++)
if (seqValid[s] != ali->nSites) ali->skippedSeqs[skipIndex++] = s;
/* Focus mode: select only focus columns (criteria below) */
int nValidSites = ali->nSites;
int* siteValid = (int *) malloc(ali->nSites * sizeof(int));
for (int i = 0; i < ali->nSites; i++) siteValid[i] = 1;
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
/* For proteins, remove lower case and gap columns */
if ((ali->alphabet == codesAA)
&& (seq(ali->target, i) < 0))
siteValid[i] = 0;
/* Discard gaps */
if ((ali->alphabet == codesAA)
|| (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE))
if (seq(ali->target, i) == 0) siteValid[i] = 0;
}
nValidSites = 0;
for (int i = 0; i < ali->nSites; i++)
if (siteValid[i] == 1) nValidSites++;
fprintf(stderr,
"%d sites out of %d\n", nValidSites, ali->nSites);
} else {
fprintf(stderr,
"%d sites\n", ali->nSites);
}
/* Focus mode: parse region (NAME/START_IX-END_IX) and map offsets */
int leftOffset = 0;
if (ali->target >= 0) {
char *focusName = ali->names[ali->target];
/* Name should be immediately followed by '/' */
if (strlen(focusName) > strlen(options->target) + 1
&& focusName[strlen(options->target)] == '/') {
/* Attempt to read integer region start */
int regLeft = strlen(options->target) + 1;
int ix = 0;
if (isdigit(focusName[regLeft])) {
while (regLeft + ix < strlen(focusName)
&& isdigit(focusName[regLeft + ix + 1])) ix++;
int tens = 1;
leftOffset = -1;
for (int i = ix; i >= 0; i--) {
leftOffset += tens * (focusName[regLeft + i] - '0');
tens *= 10;
}
fprintf(stderr, "Region starts at %d\n", leftOffset + 1);
} else {
fprintf(stderr, "Error parsing region, assuming start at 1");
}
}
/* Map the offsets */
ali->offsets = (int *) malloc(nValidSites * sizeof(int));
for (int i = 0; i < nValidSites; i++) ali->offsets[i] = i + 1;
int ix = 0;
for (int i = 0; i < ali->nSites; i++)
if (siteValid[i] == 1) {
ali->offsets[ix] = i + 1 + leftOffset;
ix++;
}
/* Reposition the target for reduced alignment */
int targetShift = -1;
for (int i = 0; i <= ali->target; i++)
if (seqValid[i] == ali->nSites) targetShift++;
ali->target = targetShift;
}
/* Copy only selected rows and columns */
if (nValidSeqs < ali->nSeqs || nValidSites < ali->nSites) {
letter_t *seqsReduced = (letter_t *)
malloc(nValidSites * nValidSeqs * sizeof(letter_t));
for (int i = 0; i < nValidSites * nValidSeqs; i++) seqsReduced[i] = 0;
int sx = 0;
for (int s = 0; s < ali->nSeqs; s++)
if (seqValid[s] == ali->nSites) {
int ix = 0;
for (int i = 0; i < ali->nSites; i++) {
if (siteValid[i] == 1) {
seqsReduced[ix + sx * nValidSites] = seq(s, i);
ix++;
}
}
sx++;
}
/* Reallocate alignment with reduced dimensions */
free(ali->sequences);
ali->nSeqs = nValidSeqs;
ali->nSites = nValidSites;
ali->sequences = (letter_t *)
malloc(nValidSites * nValidSeqs * sizeof(letter_t));
for (int i = 0; i < nValidSites * nValidSeqs; i++)
ali->sequences[i] = 0;
for (int s = 0; s < nValidSeqs; s++)
for (int i = 0; i < nValidSites; i++)
seq(s, i) = seqsReduced[i + s * nValidSites];
free(seqsReduced);
}
/* Shift any lowercase codes back to uppercase */
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if (seq(s, i) < 0) seq(s, i) += ali->nCodes;
/* Intialize weights to 1.0 */
ali->weights = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t));
for (int s = 0; s < ali->nSeqs; s++) ali->weights[s] = 1.0;
ali->nEff = (numeric_t) ali->nSeqs;
/* --------------------------------_DEBUG_--------------------------------*/
/* Display offset map */
// for (int i = 0; i < ali->nSites; i++) {
// fprintf(stderr, "%d : %d : %c\n", i + 1, ali->offsets[i],
// ali->alphabet[seq(ali->target, i)]);
// }
// exit(0);
/* Display target */
// for (int i = 0; i < ali->nSites; i++) {
// fprintf(stderr, "%c", ali->alphabet[seq(ali->target, i)]);
// }
// fprintf(stderr, "\n");
// exit(0);
/* --------------------------------^DEBUG^--------------------------------*/
/* --------------------------------_DEBUG_--------------------------------*/
// for (int s = 0; s < ali->nSeqs; s++) {
// fprintf(stderr, ">%s\n", ali->names[s]);
// for (int i = 0; i < ali->nSites; i++)
// fprintf(stderr, "%c", ali->alphabet[seq(s, i)]);
// fprintf(stderr, "\n");
// }
/* --------------------------------^DEBUG^--------------------------------*/
return ali;
}
void MSAReadSeq(char *seq, FILE *fpAli) {
/* Read sequence from the current line(s) */
char buf[BUFFER_SIZE];
/* Look ahead one character */
char c = fgetc(fpAli);
ungetc(c, fpAli);
seq[0] = '\0';
while (c != '>' && !feof(fpAli)) {
fgetstr(buf, fpAli);
strcat(seq, buf);
/* Look ahead one character */
c = fgetc(fpAli);
ungetc(c, fpAli);
}
}
letter_t MSAReadCode(char c, char *alphabet, int nCodes) {
/* Encode a character as an integer between -nCodes and +nCodes
In alphabet: store index [0, nCodes - 1]
Lowercase version of alphabet: downshift by nCodes [-nCodes, -1]
Out of alphabet: store nCodes [nCodes]
*/
letter_t i = 0;
/* Protein-specific treatment of '.' */
if (alphabet == codesAA) if (c == '.') c = '-';
/* Store lowercase characters as down-shifted by nCodes */
while ((i < nCodes - 1) && toupper(c) != alphabet[i]) i++;
if (c != alphabet[i] && toupper(c) == alphabet[i]) i -= nCodes;
/* Encode out-of-alphabet characters by [nCodes] */
if (i > 0 && toupper(c) != alphabet[i]) i = nCodes;
return i;
}
void MSAReweightSequences(char *repeatWeightsFile,alignment_t *ali, options_t *options) {
/* Reweight seqeuences by their inverse neighborhood size. Each sequence's
weight is the inverse of the number of neighboring sequences with less
than THETA percent divergence
*/
FILE *fpRepeatWeights = NULL;
for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] = 1.0;
/* Only apply reweighting if theta is on [0,1] */
if (options->theta >= 0 && options->theta <= 1) {
/* The neighborhood size of each sequence is the number of sequences
in the alignment within theta percent divergence */
if (options->fastWeights > 0 && options->fastWeights < ali->nSeqs) {
/* Cluster the sequences with k-consensus */
int nClusters = options->fastWeights;
int nIterations = 10;
int nSeqs = ali->nSeqs;
int nCodes = ali->nCodes;
int nSites = ali->nSites;
#define COUNTS(i,j,a) counts[i * nSites * nCodes + j * nCodes + a]
#define CONSENSUS(i,j) consensus[i * nSites + j]
#define ALI(i,j) aliPermute[i * nSites + j]
/* Pick initial clusters with Reservoir sampling */
int *clusters = (int *) malloc(nClusters * sizeof(int));
letter_t *consensus = (letter_t *) malloc(nClusters * nSites * sizeof(letter_t));
for (int i = 0; i < nClusters; i++) clusters[i] = i;
for (int i = nClusters; i < nSeqs; i++) {
int ix = genrand_int32() % (i);
if (ix < nClusters) clusters[ix] = i;
}
for (int i = 0; i < nClusters; i++)
for (int j = 0; j < nSites; j++)
CONSENSUS(i,j) = seq(clusters[i], j);
free(clusters);
/* EM steps */
int *assignment = (int *) malloc(nSeqs * sizeof(int));
int *counts = (int *) malloc(nClusters * nSites * nCodes * sizeof(int));
int *radii = (int *) malloc(nClusters * sizeof(int));
for (int i = 0; i < nSeqs; i++) assignment[i] = 0;
fprintf(stderr, "Clustering");
for (int t = 0; t < nIterations; t++) {
fprintf(stderr, ".");
/* Step 1. Update the assignments */
for (int i = 0; i < nClusters; i++) radii[i] = 0;
#pragma omp parallel for
for (int s = 0; s < nSeqs; s++) {
int ixOld = assignment[s];
/* Current distance to current assignment */
numeric_t distance = 0;
for (int j = 0; j < nSites; j++)
distance += (CONSENSUS(ixOld, j) != seq(s, j));
/* Find closest */
int ixNew = ixOld;
for (int i = 0; i < nClusters; i++) {
numeric_t distanceI = 0;
for (int j = 0; j < nSites; j++)
distanceI += (CONSENSUS(i, j) != seq(s, j));
if (distanceI < distance) {
ixNew = i;
distance = distanceI;
}
}
if (ixNew != ixOld) assignment[s] = ixNew;
if (radii[ixNew] < distance) radii[ixNew] = distance;
}
/* --------------------------_DEBUG_--------------------------*/
// for (int s = 0; s < nClusters; s++) {
// int size = 0;
// for (int i = 0; i < nSeqs; i++) size += (assignment[i] == s);
// fprintf(stderr, ">Cluster %d, %d members, radius %d\n", s, size, radii[s]);
// for (int i = 0; i < ali->nSites; i++)
// if (CONSENSUS(s,i) >= 0) {
// fprintf(stderr, "%c", ali->alphabet[CONSENSUS(s,i)]);
// } else {
// fprintf(stderr, " ");
// }
// fprintf(stderr, "\n");
// }
/* --------------------------^DEBUG^--------------------------*/
/* Step 2. Update the consensus sequences */
/* Update the counts */
if (t < nIterations - 1) {
for (int i = 0; i < nClusters * nSites * nCodes; i++)
counts[i] = 0;
for (int s = 0; s < nSeqs; s++)
for (int j = 0; j < nSites; j++)
COUNTS(assignment[s], j, seq(s, j)) += 1;
#pragma omp parallel for
for (int i = 0; i < nClusters; i++)
for (int j = 0; j < nSites; j++) {
int topCode = 0;
int topCounts = COUNTS(i, j, 0);
for (int b = 1; b < nCodes; b++)
if (COUNTS(i, j, b) > topCounts) {
topCode = b;
topCounts = COUNTS(i, j, b);
}
CONSENSUS(i ,j) = topCode;
}
}
}
fprintf(stderr, "\n");
/* Profile-profile distances */
numeric_t *clusterID = (numeric_t *) malloc(nClusters * nClusters * sizeof(numeric_t));
for (int i = 0; i < nClusters * nClusters; i++) clusterID[i] = 0;
#pragma omp parallel for
for (int pi = 0; pi < nClusters; pi++)
for (int pj = 0; pj < nClusters; pj++)
for (int j = 0; j < nSites; j++)
clusterID[pi + pj * nClusters] +=
(CONSENSUS(pi,j) == CONSENSUS(pj,j));
free(consensus);
free(counts);
/* Permute alignment */
int *clusterSizes = (int *) malloc(nClusters * sizeof(int));
int *clusterStart = (int *) malloc(nClusters * sizeof(int));
int *clusterEnd = (int *) malloc(nClusters * sizeof(int));
int *permuteMap = (int *) malloc(nSeqs * sizeof(int));
numeric_t *weightsP = (numeric_t *) malloc(nSeqs * sizeof(numeric_t));
letter_t *aliPermute = (letter_t *) malloc(nSeqs * nSites * sizeof(letter_t));
for (int i = 0; i < nClusters; i++) clusterSizes[i] = 0;
for (int s = 0; s < ali->nSeqs; s++)
clusterSizes[assignment[s]] += 1;
int ix = 0;
for (int i = 0; i < nClusters; i++) {
clusterStart[i] = ix;
ix += clusterSizes[i];
clusterEnd[i] = ix;
}
ix = 0;
for (int i = 0; i < nClusters; i++)
for (int s = 0; s < ali->nSeqs; s++)
if (assignment[s] == i) {
for (int j = 0; j < nSites; j++)
ALI(ix,j) = seq(s,j);
permuteMap[ix] = s;
ix++;
}
/* ----------------------------_DEBUG_----------------------------*/
// for (int s = 0; s < nSeqs; s++) {
// fprintf(stdout, ">Seq %d\n", s);
// for (int i = 0; i < ali->nSites; i++)
// fprintf(stdout, "%c", ali->alphabet[ALI(s,i)]);
// fprintf(stdout, "\n");
// }
/* ----------------------------^DEBUG^----------------------------*/
/* Sequence weights */
numeric_t cutoff = (numeric_t) ((1 - options->theta) * ali->nSites);
for (int s = 0; s < nSeqs; s++) weightsP[s] = 1;
#pragma omp parallel for
for (int ci = 0; ci < nClusters; ci++)
for (int cj = 0; cj < nClusters; cj++)
if (clusterID[ci * nClusters + cj] >= 0.9 * cutoff)
for (int s = clusterStart[ci]; s < clusterEnd[ci]; s++)
for (int t = clusterStart[cj]; t < clusterEnd[cj]; t++)
if (s != t) {
int id = 0;
for (int n = 0; n < ali->nSites; n++)
id += (ALI(s, n) == ALI(t, n));
if (id >= cutoff) weightsP[s] += 1.0;
}
for (int s = 0; s < nSeqs; s++)
ali->weights[permuteMap[s]] = weightsP[s];
#undef COUNTS
#undef CONSENSUS
#undef ALI
free(clusterSizes);
free(clusterStart);
free(clusterEnd);
free(permuteMap);
free(weightsP);
free(radii);
free(aliPermute);
} else {
/* Deterministic sequence weights */
#if defined(_OPENMP)
/* Naive parallelization is faster ignoring symmetry */
#pragma omp parallel for
for (int s = 0; s < ali->nSeqs; s++)
for (int t = 0; t < ali->nSeqs; t++)
if (s != t) {
int id = 0;
for (int n = 0; n < ali->nSites; n++)
id += (seq(s, n) == seq(t, n));
if (id >= ((1 - options->theta) * ali->nSites))
ali->weights[s] += 1.0;
}
#else
/* For a single core, take advantage of symmetry */
for (int s = 0; s < ali->nSeqs - 1; s++)
for (int t = s + 1; t < ali->nSeqs; t++) {
int id = 0;
for (int n = 0; n < ali->nSites; n++)
id += (seq(s, n) == seq(t, n));
if (id >= ((1 - options->theta) * ali->nSites)) {
ali->weights[s] += 1.0;
ali->weights[t] += 1.0;
}
}
#endif
}
/* Reweight sequences by the inverse of the neighborhood size */
for (int i = 0; i < ali->nSeqs; i++)
ali->weights[i] = 1.0 / ali->weights[i];
}
/* Scale sets the effective number of samples per neighborhood */
for (int i = 0; i < ali->nSeqs; i++) {
ali->weights[i] *= options->scale;
}
/*--------------------_DEBUG_---------------------*/
//for (int i = 0; i < ali->nSeqs; i++) {
// fprintf(stderr,"%lf",ali->weights[i]);
//}
//exit(1);
/*--------------------^DEBUG^--------------------*/
numeric_t *weightsratio = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t));
/* The effective number of sequences is then the sum of the weights */
ali->nEff = 0;
for (int i = 0; i < ali->nSeqs; i++) {
ali->nEff += ali->weights[i];
weightsratio[i] = ali->weights[i];
}
/* Ada repeat weights multiplication */
if (repeatWeightsFile!=NULL) {
fprintf(stderr,"repeats file exists\n");
fpRepeatWeights = fopen(repeatWeightsFile,"r");
char *line = (char *) malloc(100 * sizeof(char));
fprintf(stderr,"memory allocated for repeat weights lines\n");
int j = 0;
numeric_t *repeatWeights = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t));
fprintf(stderr,"memory allocated for repeat weights\n");
while(fgets(line,100,fpRepeatWeights)) {
if(sscanf(line,"%lf",&repeatWeights[j])!=EOF) {
++j;
}
/* --------------------_DEBUG_------------------*/
//fprintf(stderr,"repeat weights: %lf \n",repeatWeights[j]);
//fprintf(stderr,"file input line: %s \n", line);
/* --------------------^DEBUG^------------------*/
}
fprintf(stderr,"repeats while loop success!\n");
for (int i = 0; i < ali->nSeqs; i++) {
ali->weights[i] *= repeatWeights[i];
}
fprintf(stderr,"repeats multiplication successful. process did not fail!\n");
free(repeatWeights);
free(line);
}
if (options->theta >= 0 && options->theta <= 1) {
fprintf(stderr,
"Effective number of samples: %.1f\t(%.0f%% identical neighborhood = %.3f samples)\n",
ali->nEff, 100 * (1 - options->theta), options->scale);
} else {
fprintf(stderr,
"Theta not between 0 and 1, no sequence reweighting applied (N = %.2f)\n",
ali->nEff);
}
/*--------------------_DEBUG_---------------------*/
for (int i = 0; i < ali->nSeqs; i++) {
fprintf(stderr,"%f\n",ali->weights[i]/weightsratio[i]);
}
//exit(1);
/*--------------------^DEBUG^--------------------*/
free(weightsratio);
}
void MSAReweightSequencesHenikoff(char *heniWeightsFile, alignment_t *ali, options_t *options) {
FILE *fpHeniWeights = NULL;
for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] = 1.0;
if (heniWeightsFile!=NULL) {
fprintf(stderr,"henikoff and repeats file exists\n");
fpHeniWeights = fopen(heniWeightsFile,"r");
char *line = (char *) malloc(100 * sizeof(char));
fprintf(stderr,"memory allocated for henikoff and repeat weights lines\n");
int j = 0;
numeric_t *repeatWeights = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t));
fprintf(stderr,"memory allocated for henikoff and repeats weights\n");
while(fgets(line,100,fpHeniWeights)) {
if(sscanf(line,"%lf",&repeatWeights[j])!=EOF) {
++j;
}
/* --------------------_DEBUG_------------------*/
fprintf(stderr,"repeat*henikoff weights: %lf \n",repeatWeights[j]);
fprintf(stderr,"file input line: %s \n", line);
/* --------------------^DEBUG^------------------*/
}
fprintf(stderr,"repeats while loop success!\n");
for (int i = 0; i < ali->nSeqs; i++) {
ali->weights[i] *= repeatWeights[i];
}
fprintf(stderr,"repeats multiplication successful. process did not fail!\n");
free(repeatWeights);
free(line);
}
/* The effective number of sequences is then the sum of the weights */
ali->nEff = 0;
for (int i = 0; i < ali->nSeqs; i++) ali->nEff += ali->weights[i];
}
void MSACountMarginals(alignment_t *ali, options_t *options) {
/* Compute first and second order marginal distributions, according to the
sequence weights
*/
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
/* Condition the marginals on ungapped */
ali->nCodes = strlen(ali->alphabet) - 1;
/* First-order marginals P_i(Ai) */
int nFi = ali->nSites * ali->nCodes;
ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t));
for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if (seq(s, i) > 0)
fi(i, seq(s, i) - 1) += ali->weights[s];
/* Second-order marginals P_ij(Ai, Aj) */
int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t));
for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, i) > 0) if(seq(s, j) > 0)
fij(i, j, seq(s, i) - 1, seq(s, j) - 1)
+= ali->weights[s];
/* Normalize conditional distributions */
for (int i = 0; i < ali->nSites; i++) {
double fsum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
fsum += fi(i, ai);
if (fsum != 0) {
double fsumInv = 1.0 / fsum;
for (int ai = 0; ai < ali->nCodes; ai++)
fi(i, ai) *= fsumInv;
} else {
/* Handle empty columns */
numeric_t flatF = 1.0 / ((numeric_t) ali->nCodes);
for (int ai = 0; ai < ali->nCodes; ai++)
fi(i, ai) = flatF;
}
}
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
double fsum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fsum += fij(i, j, ai, aj);
if (fsum != 0) {
double fsumInv = 1.0 / fsum;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fij(i, j, ai, aj) *= fsumInv;
} else {
/* Handle pairs of empty columns */
numeric_t flatF = 1.0 / ((numeric_t) (ali->nCodes * ali->nCodes));
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fij(i, j, ai, aj) = flatF;
}
}
} else {
/* Compute regular marginals */
numeric_t Zinv = 1.0 / ali->nEff;
/* First-order marginals P_i(Ai) */
int nFi = ali->nSites * ali->nCodes;
ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t));
for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
fi(i, seq(s, i)) += ali->weights[s] * Zinv;
/* Second-order marginals P_ij(Ai, Aj) */
int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t));
for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
fij(i, j, seq(s, i), seq(s, j)) += ali->weights[s] * Zinv;
}
}
void MSAFree(alignment_t *ali, options_t *options) {
/* Free alignment and options */
if (ali->names && ali->names[0])
for (int i = 0; i < ali->nSeqs; i++) free(ali->names[i]);
free(ali->names);
free(ali->sequences);
free(ali->weights);
free(ali->fi);
free(ali->fij);
/* Note: options->target and options->alphabet are never allocated */
free(options);
}
#define OUTPUT_PRECISION float
void OutputParametersSite(char *outputFile, const numeric_t *x,
alignment_t *ali) {
FILE *fpOutput = NULL;
fpOutput = fopen(outputFile, "w");
if (fpOutput != NULL) {
/* 1: nSites */
fwrite(&(ali->nSites), sizeof(ali->nSites), 1, fpOutput);
/* 2: (Focus mode only) target sequence */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
char c = (char) ali->alphabet[seq(ali->target, i)];
fwrite(&c, sizeof(char), 1, fpOutput);
}
} else {
char c = ali->alphabet[0];
for (int i = 0; i < ali->nSites; i++)
fwrite(&c, sizeof(c), 1, fpOutput);
}
/* 3: (Focus mode only) offset map */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int ix = ali->offsets[i];
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
} else {
for (int i = 0; i < ali->nSites; i++) {
int ix = i + 1;
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
}
/* 4,5: sitewise marginals fi, twice */
for (int x = 0; x < 2; x++)
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 6: sitewise parameters hi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai);
fwrite(&h, sizeof(h), 1, fpOutput);
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing parameters\n");
exit(1);
}
}
void OutputParametersFull(char *outputFile, const numeric_t *x,
alignment_t *ali, options_t *options) {
/* File format */
FILE *fpOutput = NULL;
fpOutput = fopen(outputFile, "w");
if (fpOutput != NULL) {
/* 1: nSites */
int32_t nSites = (int32_t) ali->nSites;
fwrite(&nSites, sizeof(nSites), 1, fpOutput);
/* 2: nCodes */
int32_t nCodes = (int32_t) ali->nCodes;
fwrite(&nCodes, sizeof(nCodes), 1, fpOutput);
/* 3: nSeqs */
int32_t nSeqs = (int32_t) ali->nSeqs;
fwrite(&nSeqs, sizeof(nSeqs), 1, fpOutput);
/* 4: nSkippedSeqs */
int32_t nSkippedSeqs = (int32_t) ali->nSkippedSeqs;
fwrite(&nSkippedSeqs, sizeof(nSkippedSeqs), 1, fpOutput);
/* 5: number of iterations */
int32_t maxIter = (int32_t) options->maxIter;
fwrite(&maxIter, sizeof(maxIter), 1, fpOutput);
/* 6: theta */
OUTPUT_PRECISION theta = (OUTPUT_PRECISION) options->theta;
fwrite(&theta, sizeof(theta), 1, fpOutput);
/* 7: lambda for fields (lh) */
OUTPUT_PRECISION lh = (OUTPUT_PRECISION) options->lambdaH;
fwrite(&lh, sizeof(lh), 1, fpOutput);
/* 8: lambda for couplings (le) */
OUTPUT_PRECISION le = (OUTPUT_PRECISION) options->lambdaE;
fwrite(&le, sizeof(le), 1, fpOutput);
/* 9: group lambda for couplings (lg) */
OUTPUT_PRECISION lg = (OUTPUT_PRECISION) options->lambdaGroup;
fwrite(&lg, sizeof(lg), 1, fpOutput);
/* 10: effective sample size (nEff) */
OUTPUT_PRECISION nEff = (OUTPUT_PRECISION) ali->nEff;
fwrite(&nEff, sizeof(nEff), 1, fpOutput);
/* 11: alphabet */
int isGapped = (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE);
for (int i = 0; i < ali->nCodes; i++) {
int8_t letter = (int8_t) ali->alphabet[i + isGapped];
fwrite(&letter, sizeof(letter), 1, fpOutput);
}
/* 12: sequence number of neighbors (self included) */
int skipix = 0, reducedix = 0;
for (int s = 0; s < ali->nSeqs + ali->nSkippedSeqs; s++) {
if (skipix < ali->nSkippedSeqs && s == ali->skippedSeqs[skipix]) {
/* Skip skipped sequences */
OUTPUT_PRECISION w = (OUTPUT_PRECISION) 0;
fwrite(&w, sizeof(w), 1, fpOutput);
skipix++;
} else {
numeric_t nNeighbors = ali->weights[reducedix];
nNeighbors = 1.0 / (nNeighbors * options->scale);
OUTPUT_PRECISION w = (OUTPUT_PRECISION) nNeighbors;
fwrite(&w, sizeof(w), 1, fpOutput);
reducedix++;
}
}
/* 13: (Focus mode) target sequence */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int8_t c = (int8_t) ali->alphabet[seq(ali->target, i)];
fwrite(&c, sizeof(c), 1, fpOutput);
}
} else {
int8_t c = (int8_t) ali->alphabet[0];
for (int i = 0; i < ali->nSites; i++)
fwrite(&c, sizeof(c), 1, fpOutput);
}
/* 14: (Focus mode) offset map */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int32_t ix = (int32_t) ali->offsets[i];
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
} else {
for (int i = 0; i < ali->nSites; i++) {
int32_t ix = (int32_t) i + 1;
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
}
/* 15: sitewise marginals fi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 16: sitewise parameters hi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai);
fwrite(&h, sizeof(h), 1, fpOutput);
}
/* 17: pairwise marginals fij */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
OUTPUT_PRECISION f =
(OUTPUT_PRECISION) fij(i, j, ai, aj);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 18: couplings eij */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
OUTPUT_PRECISION e =
(OUTPUT_PRECISION) xEij(i, j, ai, aj);
fwrite(&e, sizeof(e), 1, fpOutput);
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing parameters\n");
exit(1);
}
}
#undef OUTPUT_PRECISION
void OutputCouplingScores(char *couplingsFile, const numeric_t *x,
alignment_t *ali, options_t *options) {
FILE *fpOutput = NULL;
fpOutput = fopen(couplingsFile, "w");
if (fpOutput != NULL) {
/* Compute the norm of the coupling parameters between each pair */
numeric_t *couplings =
(numeric_t *) malloc((ali->nSites * (ali->nSites - 1) / 2)
* sizeof(numeric_t));
for (int i = 0; i < ali->nSites * (ali->nSites - 1) / 2;
i++) couplings[i] = 0;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
/* Norm(eij) over ai, aj */
numeric_t norm = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
norm += xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
norm = sqrt(norm);
coupling(i, j) = norm;
}
numeric_t nPairs =
((numeric_t) ((ali->nSites) * (ali->nSites - 1))) / 2.0;
/* Remove first component of the norms (Average Product Correction) */
if (!options->zeroAPC) {
/* Determine the site-wise statistics of the norms */
numeric_t C_avg = 0.0;
numeric_t *C_pos_avg =
(numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) {
C_pos_avg[i] = 0.0;
}
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
C_pos_avg[i] +=
coupling(i, j) / (numeric_t) (ali->nSites - 1);
C_pos_avg[j] +=
coupling(i, j) / (numeric_t) (ali->nSites - 1);
C_avg += coupling(i, j) / nPairs;
}
}
/* Remove the first component */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
coupling(i, j) =
coupling(i, j) - C_pos_avg[i] * C_pos_avg[j] / C_avg;
}
/* Output scores */
if (ali->target >= 0) {
/* Focus mode */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
char ai = (char) ali->alphabet[seq(ali->target, i)];
char aj = (char) ali->alphabet[seq(ali->target, j)];
fprintf(fpOutput, "%d %c %d %c 0 %f\n",
ali->offsets[i], ai, ali->offsets[j], aj,
coupling(i, j));
}
} else {
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
fprintf(fpOutput, "%d - %d - 0 %f\n", i + 1, j + 1,
coupling(i, j));
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing coupling scores\n");
exit(1);
}
}
|
misc.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void zero_grid(domain_type * domain, int level, int grid_id){
// zero's the entire grid INCLUDING ghost zones...
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[grid_id] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=-ghosts;k<dim_k+ghosts;k++){
for(j=-ghosts;j<dim_j+ghosts;j++){
for(i=-ghosts;i<dim_i+ghosts;i++){
int ijk = i + j*pencil + k*plane;
grid[ijk] = 0.0;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_grid_to_scalar(domain_type * domain, int level, int grid_id, double scalar){
// initializes the grid to a scalar while zero'ing the ghost zones...
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[grid_id] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=-ghosts;k<dim_k+ghosts;k++){
for(j=-ghosts;j<dim_j+ghosts;j++){
for(i=-ghosts;i<dim_i+ghosts;i++){
int ijk = i + j*pencil + k*plane;
int ghostZone = (i<0) || (j<0) || (k<0) || (i>=dim_i) || (j>=dim_j) || (k>=dim_k);
grid[ijk] = ghostZone ? 0.0 : scalar;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void add_grids(domain_type * domain, int level, int id_c, double scale_a, int id_a, double scale_b, int id_b){ // c=scale_a*id_a + scale_b*id_b
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale_a*grid_a[ijk] + scale_b*grid_b[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void mul_grids(domain_type * domain, int level, int id_c, double scale, int id_a, int id_b){ // id_c=scale*id_a*id_b
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale*grid_a[ijk]*grid_b[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void scale_grid(domain_type * domain, int level, int id_c, double scale_a, int id_a){ // c[]=scale_a*a[]
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale_a*grid_a[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
double dot(domain_type * domain, int level, int id_a, int id_b){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double a_dot_b_domain = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(+:a_dot_b_domain) schedule(static)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
double a_dot_b_box = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(+:a_dot_b_box) schedule(static)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
a_dot_b_box += grid_a[ijk]*grid_b[ijk];
}}}
a_dot_b_domain+=a_dot_b_box;
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = a_dot_b_domain;
MPI_Allreduce(&send,&a_dot_b_domain,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(a_dot_b_domain);
}
//------------------------------------------------------------------------------------------------------------------------------
double norm(domain_type * domain, int level, int grid_id){ // implements the max norm
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double max_norm = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(max:max_norm) schedule(static)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[ grid_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double box_norm = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(max:box_norm) schedule(static)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
double fabs_grid_ijk = fabs(grid[ijk]);
if(fabs_grid_ijk>box_norm){box_norm=fabs_grid_ijk;} // max norm
}}}
if(box_norm>max_norm){max_norm = box_norm;}
} // box list
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = max_norm;
MPI_Allreduce(&send,&max_norm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(max_norm);
}
//------------------------------------------------------------------------------------------------------------------------------
double mean(domain_type * domain, int level, int id_a){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double sum_domain = 0.0;
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(+:sum_domain)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double sum_box = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(+:sum_box)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
sum_box += grid_a[ijk];
}}}
sum_domain+=sum_box;
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
double ncells_domain = (double)domain->dim.i*(double)domain->dim.j*(double)domain->dim.k;
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = sum_domain;
MPI_Allreduce(&send,&sum_domain,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
double mean_domain = sum_domain / ncells_domain;
return(mean_domain);
}
void shift_grid(domain_type * domain, int level, int id_c, int id_a, double shift_a){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = grid_a[ijk] + shift_a;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void project_cell_to_face(domain_type * domain, int level, int id_cell, int id_face, int dir){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_cell = domain->subdomains[box].levels[level].grids[id_cell] + ghosts*(1+pencil+plane);
double * __restrict__ grid_face = domain->subdomains[box].levels[level].grids[id_face] + ghosts*(1+pencil+plane);
int stride;
switch(dir){
case 0: stride = 1;break;//i-direction
case 1: stride = pencil;break;//j-direction
case 2: stride = plane;break;//k-direction
}
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<=dim_k;k++){ // <= to ensure you do low and high faces
for(j=0;j<=dim_j;j++){
for(i=0;i<=dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_face[ijk] = 0.5*(grid_cell[ijk-stride] + grid_cell[ijk]); // simple linear interpolation
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// Representation of an OpenMP canonical loop.
///
/// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form
/// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form
/// OpenMP 4.0, section 2.6 Canonical Loop Form
/// OpenMP 4.5, section 2.6 Canonical Loop Form
/// OpenMP 5.0, section 2.9.1 Canonical Loop Form
/// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form
///
/// An OpenMP canonical loop is a for-statement or range-based for-statement
/// with additional requirements that ensure that the number of iterations is
/// known before entering the loop and allow skipping to an arbitrary iteration.
/// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is
/// known to fulfill OpenMP's canonical loop requirements because of being
/// associated to an OMPLoopBasedDirective. That is, the general structure is:
///
/// OMPLoopBasedDirective
/// [`- CapturedStmt ]
/// [ `- CapturedDecl]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- Stmt
///
/// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some
/// directives such as OMPParallelForDirective, but others do not need them
/// (such as OMPTileDirective). In The OMPCanonicalLoop and
/// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the
/// directive. A OMPCanonicalLoop must not appear in the AST unless associated
/// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the
/// OMPCanonicalLoop may also be wrapped in a CompoundStmt:
///
/// [...]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- CompoundStmt
/// |- Leading in-between code (if any)
/// |- OMPCanonicalLoop
/// | `- ForStmt/CXXForRangeStmt
/// | `- ...
/// `- Trailing in-between code (if any)
///
/// The leading/trailing in-between code must not itself be a OMPCanonicalLoop
/// to avoid confusion which loop belongs to the nesting.
///
/// There are three different kinds of iteration variables for different
/// purposes:
/// * Loop user variable: The user-accessible variable with different value for
/// each iteration.
/// * Loop iteration variable: The variable used to identify a loop iteration;
/// for range-based for-statement, this is the hidden iterator '__begin'. For
/// other loops, it is identical to the loop user variable. Must be a
/// random-access iterator, pointer or integer type.
/// * Logical iteration counter: Normalized loop counter starting at 0 and
/// incrementing by one at each iteration. Allows abstracting over the type
/// of the loop iteration variable and is always an unsigned integer type
/// appropriate to represent the range of the loop iteration variable. Its
/// value corresponds to the logical iteration number in the OpenMP
/// specification.
///
/// This AST node provides two captured statements:
/// * The distance function which computes the number of iterations.
/// * The loop user variable function that computes the loop user variable when
/// given a logical iteration number.
///
/// These captured statements provide the link between C/C++ semantics and the
/// logical iteration counters used by the OpenMPIRBuilder which is
/// language-agnostic and therefore does not know e.g. how to advance a
/// random-access iterator. The OpenMPIRBuilder will use this information to
/// apply simd, workshare-loop, distribute, taskloop and loop directives to the
/// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an
/// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an
/// OMPLoopDirective and skipped when searching for the associated syntactical
/// loop.
///
/// Example:
/// <code>
/// std::vector<std::string> Container{1,2,3};
/// for (std::string Str : Container)
/// Body(Str);
/// </code>
/// which is syntactic sugar for approximately:
/// <code>
/// auto &&__range = Container;
/// auto __begin = std::begin(__range);
/// auto __end = std::end(__range);
/// for (; __begin != __end; ++__begin) {
/// std::String Str = *__begin;
/// Body(Str);
/// }
/// </code>
/// In this example, the loop user variable is `Str`, the loop iteration
/// variable is `__begin` of type `std::vector<std::string>::iterator` and the
/// logical iteration number type is `size_t` (unsigned version of
/// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`).
/// Therefore, the distance function will be
/// <code>
/// [&](size_t &Result) { Result = __end - __begin; }
/// </code>
/// and the loop variable function is
/// <code>
/// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) {
/// Result = __begin + Logical;
/// }
/// </code>
/// The variable `__begin`, aka the loop iteration variable, is captured by
/// value because it is modified in the loop body, but both functions require
/// the initial value. The OpenMP specification explicitly leaves unspecified
/// when the loop expressions are evaluated such that a capture by reference is
/// sufficient.
class OMPCanonicalLoop : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Children of this AST node.
enum {
LOOP_STMT,
DISTANCE_FUNC,
LOOPVAR_FUNC,
LOOPVAR_REF,
LastSubStmt = LOOPVAR_REF
};
private:
/// This AST node's children.
Stmt *SubStmts[LastSubStmt + 1] = {};
OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {}
public:
/// Create a new OMPCanonicalLoop.
static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt,
CapturedStmt *DistanceFunc,
CapturedStmt *LoopVarFunc,
DeclRefExpr *LoopVarRef) {
OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop();
S->setLoopStmt(LoopStmt);
S->setDistanceFunc(DistanceFunc);
S->setLoopVarFunc(LoopVarFunc);
S->setLoopVarRef(LoopVarRef);
return S;
}
/// Create an empty OMPCanonicalLoop for deserialization.
static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) {
return new (Ctx) OMPCanonicalLoop();
}
static bool classof(const Stmt *S) {
return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass;
}
SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); }
SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); }
/// Return this AST node's children.
/// @{
child_range children() {
return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
const_child_range children() const {
return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
/// @}
/// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt).
/// @{
Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; }
const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; }
void setLoopStmt(Stmt *S) {
assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) &&
"Canonical loop must be a for loop (range-based or otherwise)");
SubStmts[LOOP_STMT] = S;
}
/// @}
/// The function that computes the number of loop iterations. Can be evaluated
/// before entering the loop but after the syntactical loop's init
/// statement(s).
///
/// Function signature: void(LogicalTy &Result)
/// Any values necessary to compute the distance are captures of the closure.
/// @{
CapturedStmt *getDistanceFunc() {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
const CapturedStmt *getDistanceFunc() const {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
void setDistanceFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[DISTANCE_FUNC] = S;
}
/// @}
/// The function that computes the loop user variable from a logical iteration
/// counter. Can be evaluated as first statement in the loop.
///
/// Function signature: void(LoopVarTy &Result, LogicalTy Number)
/// Any other values required to compute the loop user variable (such as start
/// value, step size) are captured by the closure. In particular, the initial
/// value of loop iteration variable is captured by value to be unaffected by
/// previous iterations.
/// @{
CapturedStmt *getLoopVarFunc() {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
const CapturedStmt *getLoopVarFunc() const {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
void setLoopVarFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[LOOPVAR_FUNC] = S;
}
/// @}
/// Reference to the loop user variable as accessed in the loop body.
/// @{
DeclRefExpr *getLoopVarRef() {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
const DeclRefExpr *getLoopVarRef() const {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
void setLoopVarRef(DeclRefExpr *E) {
assert(E && "Expected non-null loop variable");
SubStmts[LOOPVAR_REF] = E;
}
/// @}
};
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Kind of the directive.
OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt,
NumChildren),
alignof(T));
auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses,
AssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T;
Inst->Data = Data;
return Inst;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) {
auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses);
if (ClausesOfKind.begin() != ClausesOfKind.end()) {
assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() &&
"There are at least 2 clauses of the specified kind");
return *ClausesOfKind.begin();
}
return nullptr;
}
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
return getSingleClause<SpecificClause>(clauses());
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getAssociatedStmt();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getCapturedStmt(RegionKind, CaptureRegions);
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getInnermostCapturedStmt(CaptureRegions);
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!Data)
return child_range(child_iterator(), child_iterator());
return Data->getAssociatedStmtAsRange();
}
const_child_range children() const {
return const_cast<OMPExecutableDirective *>(this)->children();
}
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const {
return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock();
}
Stmt *getStructuredBlock();
const Stmt *getRawStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getRawStmt();
}
Stmt *getRawStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelDirective()
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// The base class for all loop-based directives, including loop transformation
/// directives.
class OMPLoopBasedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
protected:
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned NumAssociatedLoops = 0;
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param NumAssociatedLoops Number of loops associated with the construct.
///
OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumAssociatedLoops)
: OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
NumAssociatedLoops(NumAssociatedLoops) {}
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned I = 0; I < Size; ++I) {
Counters[I] = nullptr;
PrivateCounters[I] = nullptr;
Inits[I] = nullptr;
Updates[I] = nullptr;
Finals[I] = nullptr;
DependentCounters[I] = nullptr;
DependentInits[I] = nullptr;
FinalsConditions[I] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getLoopsNumber() const { return NumAssociatedLoops; }
/// Try to find the next loop sub-statement in the specified statement \p
/// CurStmt.
/// \param TryImperfectlyNestedLoops true, if we need to try to look for the
/// imperfectly nested loop.
static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
bool TryImperfectlyNestedLoops);
static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
bool TryImperfectlyNestedLoops) {
return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
TryImperfectlyNestedLoops);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback,
llvm::function_ref<void(OMPLoopBasedDirective *)>
OnTransformationCallback);
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback,
llvm::function_ref<void(const OMPLoopBasedDirective *)>
OnTransformationCallback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
auto &&NewTransformCb =
[OnTransformationCallback](OMPLoopBasedDirective *A) {
OnTransformationCallback(A);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback, NewTransformCb);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback) {
auto &&TransformCb = [](OMPLoopBasedDirective *) {};
return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback,
TransformCb);
}
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
/// Calls the specified callback function for all the loop bodies in \p
/// CurStmt, from the outermost loop to the innermost.
static void doForAllLoopsBodies(
Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback);
static void doForAllLoopsBodies(
const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) {
Callback(Cnt, Loop, Body);
};
doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
static bool classof(const Stmt *T) {
if (auto *D = dyn_cast<OMPExecutableDirective>(T))
return isOpenMPLoopDirective(D->getDirectiveKind());
return false;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length NumAssociatedLoops are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
IterationVariableOffset = 0,
LastIterationOffset = 1,
CalcLastIterationOffset = 2,
PreConditionOffset = 3,
CondOffset = 4,
InitOffset = 5,
IncOffset = 6,
PreInitsOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 8,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
NumIterationsOffset = 15,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 16,
PrevLowerBoundVariableOffset = 16,
PrevUpperBoundVariableOffset = 17,
DistIncOffset = 18,
PrevEnsureUpperBoundOffset = 19,
CombinedLowerBoundVariableOffset = 20,
CombinedUpperBoundVariableOffset = 21,
CombinedEnsureUpperBoundOffset = 22,
CombinedInitOffset = 23,
CombinedConditionOffset = 24,
CombinedNextLowerBoundOffset = 25,
CombinedNextUpperBoundOffset = 26,
CombinedDistConditionOffset = 27,
CombinedParForInDistConditionOffset = 28,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 29,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
///
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
Data->getChildren()[IterationVariableOffset] = IV;
}
void setLastIteration(Expr *LI) {
Data->getChildren()[LastIterationOffset] = LI;
}
void setCalcLastIteration(Expr *CLI) {
Data->getChildren()[CalcLastIterationOffset] = CLI;
}
void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; }
void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; }
void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; }
void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[IsLastIterVariableOffset] = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[LowerBoundVariableOffset] = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[UpperBoundVariableOffset] = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[StrideVariableOffset] = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[EnsureUpperBoundOffset] = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextLowerBoundOffset] = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextUpperBoundOffset] = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NumIterationsOffset] = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[DistIncOffset] = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedInitOffset] = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedConditionOffset] = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedDistConditionOffset] = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedParForInDistConditionOffset] =
CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
Expr *getLastIteration() const {
return cast<Expr>(Data->getChildren()[LastIterationOffset]);
}
Expr *getCalcLastIteration() const {
return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]);
}
Expr *getPreCond() const {
return cast<Expr>(Data->getChildren()[PreConditionOffset]);
}
Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); }
Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); }
Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); }
const Stmt *getPreInits() const {
return Data->getChildren()[PreInitsOffset];
}
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]);
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]);
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]);
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[StrideVariableOffset]);
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]);
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]);
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]);
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NumIterationsOffset]);
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]);
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]);
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[DistIncOffset]);
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]);
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]);
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]);
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]);
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedInitOffset]);
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedConditionOffset]);
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]);
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]);
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]);
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionsDirective()
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSingleDirective()
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, StartLoc, EndLoc),
DirName(Name) {}
/// Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, SourceLocation(),
SourceLocation()) {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master' directive.
///
/// \code
/// #pragma omp parallel master private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel master' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master, StartLoc,
EndLoc) {}
explicit OMPParallelMasterDirective()
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelMasterDirective *>(this)
->getTaskReductionRefExpr();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelSectionsDirective()
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelSectionsDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if this directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskDirective()
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
SourceLocation(), SourceLocation()) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, SourceLocation(),
SourceLocation()) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef();
}
Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPFlushDirective()
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp depobj' directive.
///
/// \code
/// #pragma omp depobj(a) depend(in:x,y)
/// \endcode
/// In this example directive '#pragma omp depobj' initializes a depobj object
/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
class OMPDepobjDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDepobjDirective()
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPDepobjDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDepobjDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param IsStandalone true, if the the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
bool IsStandalone, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart = false;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPAtomicDirective()
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
enum DataPositionTy : size_t {
POS_X = 0,
POS_V,
POS_E,
POS_UpdateExpr,
};
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) {
Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE;
}
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; }
public:
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
}
const Expr *getX() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(
Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(
Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
}
const Expr *getV() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
}
const Expr *getExpr() const {
return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDirective()
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDataDirective()
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetEnterDataDirective()
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetExitDataDirective()
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetParallelDirective()
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTeamsDirective()
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// statements and child expressions.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point, StartLoc,
EndLoc) {}
/// Build an empty directive.
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
/// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop simd' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \p NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
/// #pragma omp parallel master taskloop simd private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop simd' has
/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
/// expression 'val' and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetUpdateDirective()
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, StartLoc, EndLoc) {
}
/// Build an empty directive.
///
explicit OMPTargetTeamsDirective()
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents the '#pragma omp tile' loop transformation directive.
class OMPTileDirective final : public OMPLoopBasedDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumLoops)
: OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile,
StartLoc, EndLoc, NumLoops) {}
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp tile'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param NumLoops Number of associated loops (number of items in the
/// 'sizes' clause).
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
unsigned NumLoops, Stmt *AssociatedStmt,
Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp tile' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
/// \param NumLoops Number of associated loops to allocate.
static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned NumLoops);
unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
/// Gets/sets the associated loops after tiling.
///
/// This is in de-sugared format stored as a CompoundStmt.
///
/// \code
/// for (...)
/// ...
/// \endcode
///
/// Note that if the generated loops a become associated loops of another
/// directive, they may need to be hoisted before them.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return preinits statement.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTileDirectiveClass;
}
};
/// This represents the '#pragma omp unroll' loop transformation directive.
///
/// \code
/// #pragma omp unroll
/// for (int i = 0; i < 64; ++i)
/// \endcode
class OMPUnrollDirective final : public OMPLoopBasedDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPLoopBasedDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll,
StartLoc, EndLoc, 1) {}
/// Set the pre-init statements.
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
/// Set the de-sugared statement.
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp unroll'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPUnrollDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp unroll' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
static OMPUnrollDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses);
/// Get the de-sugared associated loops after unrolling.
///
/// This is only used if the unrolled loop becomes an associated loop of
/// another directive, otherwise the loop is emitted directly using loop
/// transformation metadata. When the unrolled loop cannot be used by another
/// directive (e.g. because of the full clause), the transformed stmt can also
/// be nullptr.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return the pre-init statements.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPUnrollDirectiveClass;
}
};
/// This represents '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive' with
/// list item 'a'.
class OMPScanDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPScanDirective()
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPScanDirectiveClass;
}
};
/// This represents '#pragma omp interop' directive.
///
/// \code
/// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait
/// \endcode
/// In this example directive '#pragma omp interop' has
/// clauses 'init', 'device', 'depend' and 'nowait'.
///
class OMPInteropDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending location of the directive.
///
OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPInteropDirective()
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses The directive's clauses.
///
static OMPInteropDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPInteropDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPInteropDirectiveClass;
}
};
/// This represents '#pragma omp dispatch' directive.
///
/// \code
/// #pragma omp dispatch device(dnum)
/// \endcode
/// This example shows a directive '#pragma omp dispatch' with a
/// device clause with variable 'dnum'.
///
class OMPDispatchDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// The location of the target-call.
SourceLocation TargetCallLoc;
/// Set the location of the target-call.
void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; }
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDispatchDirective()
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TargetCallLoc Location of the target-call.
///
static OMPDispatchDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
SourceLocation TargetCallLoc);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDispatchDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return location of target-call.
SourceLocation getTargetCallLoc() const { return TargetCallLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDispatchDirectiveClass;
}
};
/// This represents '#pragma omp masked' directive.
/// \code
/// #pragma omp masked filter(tid)
/// \endcode
/// This example shows a directive '#pragma omp masked' with a filter clause
/// with variable 'tid'.
///
class OMPMaskedDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMaskedDirective()
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMaskedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMaskedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMaskedDirectiveClass;
}
};
/// This represents '#pragma omp metadirective' directive.
///
/// \code
/// #pragma omp metadirective when(user={condition(N>10)}: parallel for)
/// \endcode
/// In this example directive '#pragma omp metadirective' has clauses 'when'
/// with a dynamic user condition to check if a variable 'N > 10'
///
class OMPMetaDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
Stmt *IfStmt;
OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMetaDirectiveClass,
llvm::omp::OMPD_metadirective, StartLoc,
EndLoc) {}
explicit OMPMetaDirective()
: OMPExecutableDirective(OMPMetaDirectiveClass,
llvm::omp::OMPD_metadirective, SourceLocation(),
SourceLocation()) {}
void setIfStmt(Stmt *S) { IfStmt = S; }
public:
static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, Stmt *IfStmt);
static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
Stmt *getIfStmt() const { return IfStmt; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMetaDirectiveClass;
}
};
} // end namespace clang
#endif
|
SimulatorBase.h | /*
Menge Crowd Simulation Framework
Copyright and trademark 2012-17 University of North Carolina at Chapel Hill
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
or
LICENSE.txt in the root of the Menge repository.
Any questions or comments should be sent to the authors menge@cs.unc.edu
<http://gamma.cs.unc.edu/Menge/>
*/
#ifndef __SIMULATOR_BASE_H__
#define __SIMULATOR_BASE_H__
/*!
@file SimulatorBase.h
@brief Contains the SimulatorBase class - the common, generic simulator to work with different
types of agents. It is templated on the Agent type.
*/
#include "MengeCore/Agents/AgentInitializer.h"
#include "MengeCore/Agents/SimulatorInterface.h"
#include "MengeCore/Agents/SpatialQueries/SpatialQuery.h"
#include "MengeCore/Runtime/Utils.h"
#include "MengeCore/mengeCommon.h"
#include <vector>
#if HAVE_OPENMP || _OPENMP
#include <omp.h>
#endif
namespace Menge {
namespace Agents {
/*!
@brief Defines the basic simulator. It is responsible for tracking agents and obstacles as
well as initializing such from files.
*/
template <class Agent>
class SimulatorBase : public SimulatorInterface {
public:
/*!
@brief Constructs a simulator instance.
*/
SimulatorBase();
/*!
@brief Destorys a simulator instance.
*/
~SimulatorBase();
/*!
@brief Lets the simulator perform a simulation step and updates the two-dimensional _p and
two-dimensional velocity of each agent.
*/
void doStep();
/*!
@brief Initalize spatial query structure.
*/
virtual bool initSpatialQuery();
/*!
@brief After all agents and all obstacles have been added to the scene does the work to finish
preparing the simulation to be run.
This work is performed when the simulator is done being initialized. If a particular new
pedestrian simulator requires particular finalization work, this function should be sub-classed
and the parent class's version of the function should be explicitly called before any additional
work is performed.
*/
virtual void finalize();
/*!
@brief Accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the
same as the agent identifier. It is merely the local index of the agent
in the simulator's local store.
@returns A pointer to the agent.
*/
virtual BaseAgent* getAgent(size_t agentNo) { return &_agents[agentNo]; }
/*!
@brief Const accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the same
as the agent identifier. It is merely the local index of the agent in the
simulator's local store.
@returns A pointer to the agent.
*/
virtual const BaseAgent* getAgent(size_t agentNo) const { return &_agents[agentNo]; }
/*!
@brief Add an agent with specified position to the simulator whose properties are defined by
the given agent initializer.
It uses the agent initializer to define the values of the remaining agent parameters.
@param pos The 2d vector representing the agent's position.
@param agentInit The AgentInitializer necessary to parse AgentSet properties.
@returns A pointer to the agent (if initialization was succesful) or NULL if failed.
*/
virtual BaseAgent* addAgent(const Vector2& pos, AgentInitializer* agentInit);
/*!
@brief Returns the count of agents in the simulation.
@returns The count of agents in the simulation.
*/
virtual size_t getNumAgents() const { return _agents.size(); }
/*!
@brief Reports if there are non-common Experiment parameters that this simulator requires in
the XML file.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool hasExpTarget() { return false; }
/*!
@brief Reports if the given Experiment attribute tag name belongs to this simulator.
@param tagName The name of the candidate experiment XML tag.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool isExpTarget(const std::string& tagName) { return false; }
/*!
@brief Given an Experiment parameter name and value, sets the appropriate simulator
parameter.
// TODO: Define the conditions of success/failure.
@param paramName A string containing the parameter name for the experiment.
@param value A string containing the value for the parameter.
@returns True if the parameter was successfully set, false otherwise.
*/
virtual bool setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException);
protected:
/*!
@brief Computes the neighbors for the given agent.
@param agent The agent whose neighbors are to be computed.
*/
void computeNeighbors(Agent* agent);
/*!
@brief The collection of agents in the simulation
*/
std::vector<Agent> _agents;
};
////////////////////////////////////////////////////////////////
// Implementation of SimulatorBase
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::SimulatorBase() : SimulatorInterface(), _agents() {}
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::~SimulatorBase() {
_agents.clear();
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::doStep() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
_spatialQuery->updateAgents();
int AGT_COUNT = static_cast<int>(_agents.size());
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
computeNeighbors(&(_agents[i]));
if(!_agents[i]._external)
_agents[i].computeNewVelocity();
}
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
if(!_agents[i]._external)
_agents[i].update(TIME_STEP);
}
_globalTime += TIME_STEP;
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::initSpatialQuery() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
const size_t AGT_COUNT = _agents.size();
std::vector<BaseAgent*> agtPointers(AGT_COUNT);
for (size_t a = 0; a < AGT_COUNT; ++a) {
agtPointers[a] = &_agents[a];
}
_spatialQuery->setAgents(agtPointers);
_spatialQuery->processObstacles();
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::finalize() {
SimulatorInterface::finalize();
// initialize agents
for (size_t i = 0; i < _agents.size(); ++i) {
_agents[i].initialize();
}
}
////////////////////////////////////////////////////////////////
template <class Agent>
BaseAgent* SimulatorBase<Agent>::addAgent(const Vector2& pos, AgentInitializer* agentInit) {
Agent agent;
agent._pos = pos;
agent._id = _agents.size();
if (!agentInit->setProperties(&agent)) {
logger << Logger::ERR_MSG << "Error initializing agent " << agent._id << "\n";
return 0x0;
}
_agents.push_back(agent);
return &_agents[_agents.size() - 1];
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException) {
if (paramName == "time_step") {
try {
LOGICAL_TIME_STEP = toFloat(value);
} catch (UtilException) {
throw XMLParamException(
std::string("Common parameters \"time_step\" value couldn't be converted "
"to a float. Found the value: ") +
value);
}
} else {
return false;
}
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::computeNeighbors(Agent* agent) {
// obstacles
agent->startQuery();
_spatialQuery->obstacleQuery(agent);
// agents
if (agent->_maxNeighbors > 0) {
_spatialQuery->agentQuery(agent);
}
}
} // namespace Agents
} // namespace Menge
#endif // __SIMULATOR_BASE_H__
|
GB_binop__bshift_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int16)
// C=scalar+B GB (_bind1st__bshift_int16)
// C=scalar+B' GB (_bind1st_tran__bshift_int16)
// C=A+scalar GB (_bind2nd__bshift_int16)
// C=A'+scalar GB (_bind2nd_tran__bshift_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT16 || GxB_NO_BSHIFT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix - Matrix stored and accessible by rows. Indices and values for
* the matrix nonzeros are copied into the matrix a row at a time, in any
* order using the MatrixGetRow function. The MatrixPutRow function returns
* a pointer to the indices and values of a row. The matrix has a set of
* row and column indices such that these indices begin at "beg" and end
* at "end", where 0 <= "beg" <= "end". In other words, the matrix indices
* have any nonnegative base value, and the base values of the row and column
* indices must agree.
*
*****************************************************************************/
#include <stdlib.h>
#include <memory.h>
#include "Common.h"
#include "Matrix.h"
#include "Numbering.h"
#define MAX_NZ_PER_ROW 1000
/*--------------------------------------------------------------------------
* MatrixCreate - Return (a pointer to) a matrix object.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows, mype, npes;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = comm;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm);
hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm);
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixCreateLocal - Return (a pointer to) a matrix object.
* The matrix created by this call is a local matrix, not a global matrix.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = hypre_MPI_COMM_NULL;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
mat->beg_rows = NULL;
mat->end_rows = NULL;
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixDestroy - Destroy a matrix object "mat".
*--------------------------------------------------------------------------*/
void MatrixDestroy(Matrix *mat)
{
HYPRE_Int i;
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->recv_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->send_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->recv_req2[i]);
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->send_req2[i]);
free(mat->recv_req);
free(mat->send_req);
free(mat->recv_req2);
free(mat->send_req2);
free(mat->statuses);
free(mat->sendind);
free(mat->sendbuf);
free(mat->recvbuf);
MemDestroy(mat->mem);
if (mat->numb)
NumberingDestroy(mat->numb);
free(mat);
}
/*--------------------------------------------------------------------------
* MatrixSetRow - Set a row in a matrix. Only local rows can be set.
* Once a row has been set, it should not be set again, or else the
* memory used by the existing row will not be recovered until
* the matrix is destroyed. "row" is in global coordinate numbering.
*--------------------------------------------------------------------------*/
void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind, HYPRE_Real *val)
{
row -= mat->beg_row;
mat->lens[row] = len;
mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len*sizeof(HYPRE_Int));
mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len*sizeof(HYPRE_Real));
if (ind != NULL)
hypre_TMemcpy(mat->inds[row], ind, HYPRE_Int, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
if (val != NULL)
hypre_TMemcpy(mat->vals[row], val, HYPRE_Real, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
/*--------------------------------------------------------------------------
* MatrixGetRow - Get a *local* row in a matrix.
*--------------------------------------------------------------------------*/
void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp, HYPRE_Real **valp)
{
*lenp = mat->lens[row];
*indp = mat->inds[row];
*valp = mat->vals[row];
}
/*--------------------------------------------------------------------------
* MatrixRowPe - Map "row" to a processor number.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row)
{
HYPRE_Int npes, pe;
HYPRE_Int *beg = mat->beg_rows;
HYPRE_Int *end = mat->end_rows;
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
if (row >= beg[pe] && row <= end[pe])
return pe;
}
hypre_printf("MatrixRowPe: could not map row %d.\n", row);
PARASAILS_EXIT;
return -1; /* for picky compilers */
}
/*--------------------------------------------------------------------------
* MatrixNnz - Return total number of nonzeros in preconditioner.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixNnz(Matrix *mat)
{
HYPRE_Int num_local, i, total, alltotal;
num_local = mat->end_row - mat->beg_row + 1;
total = 0;
for (i=0; i<num_local; i++)
total += mat->lens[i];
hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm);
return alltotal;
}
/*--------------------------------------------------------------------------
* MatrixPrint - Print a matrix to a file "filename". Each processor
* appends to the file in order, but the file is overwritten if it exists.
*--------------------------------------------------------------------------*/
void MatrixPrint(Matrix *mat, char *filename)
{
HYPRE_Int mype, npes, pe;
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
hypre_MPI_Barrier(mat->comm);
if (mype == pe)
{
FILE *file = fopen(filename, (pe==0 ? "w" : "a"));
hypre_assert(file != NULL);
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
hypre_fprintf(file, "%d %d %.14e\n",
row + mat->beg_row,
mat->numb->local_to_global[ind[i]], val[i]);
}
fclose(file);
}
}
}
/*--------------------------------------------------------------------------
* MatrixReadMaster - MatrixRead routine for processor 0. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadMaster(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
HYPRE_Int mype, npes;
FILE *file;
HYPRE_Int ret;
HYPRE_Int num_rows, curr_proc;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
hypre_longint outbuf;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
char line[100];
HYPRE_Int oldrow;
hypre_MPI_Request request;
hypre_MPI_Status status;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
file = fopen(filename, "r");
hypre_assert(file != NULL);
fgets(line, 100, file);
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
offset = ftell(file);
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
request = hypre_MPI_REQUEST_NULL;
curr_proc = 1; /* proc for which we are looking for the beginning */
while (curr_proc < npes)
{
if (row == mat->beg_rows[curr_proc])
{
hypre_MPI_Wait(&request, &status);
outbuf = offset;
hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request);
curr_proc++;
}
offset = ftell(file);
oldrow = row;
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
if (oldrow > row)
{
hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n");
PARASAILS_EXIT;
}
}
/* Now read our own part */
rewind(file);
fgets(line, 100, file);
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
hypre_MPI_Wait(&request, &status);
}
/*--------------------------------------------------------------------------
* MatrixReadSlave - MatrixRead routine for other processors. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadSlave(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
hypre_MPI_Status status;
HYPRE_Int mype;
FILE *file;
HYPRE_Int ret;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
HYPRE_Real time0, time1;
file = fopen(filename, "r");
hypre_assert(file != NULL);
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status);
time0 = hypre_MPI_Wtime();
ret = fseek(file, offset, SEEK_SET);
hypre_assert(ret == 0);
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for slave read: %f\n", mype, time1-time0);
}
/*--------------------------------------------------------------------------
* MatrixRead - Read a matrix file "filename" from disk and store in the
* matrix "mat" which has already been created using MatrixCreate. The format
* assumes no nonzero rows, the rows are in order, and there will be at least
* one row per processor.
*--------------------------------------------------------------------------*/
void MatrixRead(Matrix *mat, char *filename)
{
HYPRE_Int mype;
HYPRE_Real time0, time1;
hypre_MPI_Comm_rank(mat->comm, &mype);
time0 = hypre_MPI_Wtime();
if (mype == 0)
MatrixReadMaster(mat, filename);
else
MatrixReadSlave(mat, filename);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for reading matrix: %f\n", mype, time1-time0);
MatrixComplete(mat);
}
/*--------------------------------------------------------------------------
* RhsRead - Read a right-hand side file "filename" from disk and store in the
* location pointed to by "rhs". "mat" is needed to provide the partitioning
* information. The expected format is: a header line (n, nrhs) followed
* by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line.
*--------------------------------------------------------------------------*/
void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename)
{
FILE *file;
hypre_MPI_Status status;
HYPRE_Int mype, npes;
HYPRE_Int num_rows, num_local, pe, i, converted;
HYPRE_Real *buffer = NULL;
HYPRE_Int buflen = 0;
char line[100];
HYPRE_Int dummy;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
num_local = mat->end_row - mat->beg_row + 1;
if (mype != 0)
{
hypre_MPI_Recv(rhs, num_local, hypre_MPI_REAL, 0, 0, mat->comm, &status);
return;
}
file = fopen(filename, "r");
hypre_assert(file != NULL);
fgets(line, 100, file);
converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy);
hypre_assert(num_rows == mat->end_rows[npes-1]);
/* Read own rows first */
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &rhs[i]);
else
hypre_fscanf(file, "%lf", &rhs[i]);
for (pe=1; pe<npes; pe++)
{
num_local = mat->end_rows[pe] - mat->beg_rows[pe]+ 1;
if (buflen < num_local)
{
free(buffer);
buflen = num_local;
buffer = hypre_TAlloc(HYPRE_Real, buflen , HYPRE_MEMORY_HOST);
}
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &buffer[i]);
else
hypre_fscanf(file, "%lf", &buffer[i]);
hypre_MPI_Send(buffer, num_local, hypre_MPI_REAL, pe, 0, mat->comm);
}
free(buffer);
}
/*--------------------------------------------------------------------------
* SetupReceives
*--------------------------------------------------------------------------*/
static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist)
{
HYPRE_Int i, j, this_pe, mype;
hypre_MPI_Request request;
MPI_Comm comm = mat->comm;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
hypre_MPI_Comm_rank(comm, &mype);
mat->num_recv = 0;
/* Allocate recvbuf */
/* recvbuf has numlocal entires saved for local part of x, used in matvec */
mat->recvlen = reqlen; /* used for the transpose multiply */
mat->recvbuf = hypre_TAlloc(HYPRE_Real, (reqlen+num_local) , HYPRE_MEMORY_HOST);
for (i=0; i<reqlen; i=j) /* j is set below */
{
/* The processor that owns the row with index reqind[i] */
this_pe = MatrixRowPe(mat, reqind[i]);
/* Figure out other rows we need from this_pe */
for (j=i+1; j<reqlen; j++)
{
/* if row is on different pe */
if (reqind[j] < mat->beg_rows[this_pe] ||
reqind[j] > mat->end_rows[this_pe])
break;
}
/* Request rows in reqind[i..j-1] */
hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm, &request);
hypre_MPI_Request_free(&request);
/* Count of number of number of indices needed from this_pe */
outlist[this_pe] = j-i;
hypre_MPI_Recv_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 555,
comm, &mat->recv_req[mat->num_recv]);
hypre_MPI_Send_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 666,
comm, &mat->send_req2[mat->num_recv]);
mat->num_recv++;
}
}
/*--------------------------------------------------------------------------
* SetupSends
* This function will wait for all receives to complete.
*--------------------------------------------------------------------------*/
static void SetupSends(Matrix *mat, HYPRE_Int *inlist)
{
HYPRE_Int i, j, mype, npes;
hypre_MPI_Request *requests;
hypre_MPI_Status *statuses;
MPI_Comm comm = mat->comm;
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
requests = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
/* Determine size of and allocate sendbuf and sendind */
mat->sendlen = 0;
for (i=0; i<npes; i++)
mat->sendlen += inlist[i];
mat->sendbuf = NULL;
mat->sendind = NULL;
if (mat->sendlen)
{
mat->sendbuf = hypre_TAlloc(HYPRE_Real, mat->sendlen , HYPRE_MEMORY_HOST);
mat->sendind = hypre_TAlloc(HYPRE_Int, mat->sendlen , HYPRE_MEMORY_HOST);
}
j = 0;
mat->num_send = 0;
for (i=0; i<npes; i++)
{
if (inlist[i] != 0)
{
/* Post receive for the actual indices */
hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm,
&requests[mat->num_send]);
/* Set up the send */
hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 555, comm,
&mat->send_req[mat->num_send]);
/* Set up the receive for the transpose */
hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 666, comm,
&mat->recv_req2[mat->num_send]);
mat->num_send++;
j += inlist[i];
}
}
hypre_MPI_Waitall(mat->num_send, requests, statuses);
free(requests);
free(statuses);
/* convert global indices to local indices */
/* these are all indices on this processor */
for (i=0; i<mat->sendlen; i++)
mat->sendind[i] -= mat->beg_row;
}
/*--------------------------------------------------------------------------
* MatrixComplete
*--------------------------------------------------------------------------*/
void MatrixComplete(Matrix *mat)
{
HYPRE_Int mype, npes;
HYPRE_Int *outlist, *inlist;
HYPRE_Int row, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
mat->recv_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->recv_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
outlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
inlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
/* Create Numbering object */
mat->numb = NumberingCreate(mat, PARASAILS_NROWS);
SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc,
&mat->numb->local_to_global[mat->numb->num_loc], outlist);
hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm);
SetupSends(mat, inlist);
free(outlist);
free(inlist);
/* Convert to local indices */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
NumberingGlobalToLocal(mat->numb, len, ind, ind);
}
}
/*--------------------------------------------------------------------------
* MatrixMatvec
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static)
#endif
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
/*--------------------------------------------------------------------------
* MatrixMatvecTrans
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Post receives for local parts of the solution y */
hypre_MPI_Startall(mat->num_send, mat->recv_req2);
/* initialize accumulator buffer to zero */
for (i=0; i<mat->recvlen+num_local; i++)
mat->recvbuf[i] = 0.0;
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
{
mat->recvbuf[ind[i]] += val[i] * x[row];
}
}
/* Now can send nonlocal parts of solution to other procs */
hypre_MPI_Startall(mat->num_recv, mat->send_req2);
/* copy local part of solution into y */
for (i=0; i<num_local; i++)
y[i] = mat->recvbuf[i];
/* alternatively, loop over a wait any */
hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses);
/* add all the incoming partial sums to y */
for (i=0; i<mat->sendlen; i++)
y[mat->sendind[i]] += mat->sendbuf[i];
hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses);
}
|
splay.c | /*
Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "recycleBin.h"
#include "kmer.h"
#include "utility.h"
#define CHUNKSIZE 10000
static RecycleBin *treeMemory = NULL;
struct splayNode_st {
Kmer kmer;
Coordinate position;
struct splayNode_st *left;
struct splayNode_st *right;
IDnum seqID;
} ATTRIBUTE_PACKED;
typedef struct splayNode_st SplayNode;
typedef struct splayNode_st SplayTree;
#ifdef _OPENMP
void initSplayTreeMemory(void)
{
int n;
n = omp_get_max_threads();
#pragma omp critical
if (treeMemory == NULL)
treeMemory = newRecycleBinArray(n, sizeof(SplayNode), CHUNKSIZE);
}
#endif
static SplayNode *allocateSplayNode()
{
#ifdef _OPENMP
#ifdef DEBUG
if (treeMemory == NULL)
{
velvetLog("The memory for splay trees seems uninitialised, "
"this is probably a bug, aborting.\n");
abort();
}
#endif
return allocatePointer(getRecycleBinInArray(treeMemory,
omp_get_thread_num()));
#else
if (treeMemory == NULL)
treeMemory = newRecycleBin(sizeof(SplayNode), CHUNKSIZE);
return allocatePointer(treeMemory);
#endif
}
void destroyAllSplayTrees()
{
#ifdef _OPENMP
destroyRecycleBinArray(treeMemory);
#else
destroyRecycleBin(treeMemory);
#endif
treeMemory = NULL;
}
/* This function can be called only if K2 has a left child */
/* Perform a rotate between a node (K2) and its left child */
/* Update heights, then return new root */
static SplayNode *SingleRotateWithLeft(SplayNode * K2)
{
SplayNode *K1;
K1 = K2->left;
K2->left = K1->right;
K1->right = K2;
return K1; /* New root */
}
/* This function can be called only if K1 has a right child */
/* Perform a rotate between a node (K1) and its right child */
/* Update heights, then return new root */
static SplayNode *SingleRotateWithRight(SplayNode * K1)
{
SplayNode *K2;
K2 = K1->right;
K1->right = K2->left;
K2->left = K1;
return K2; /* New root */
}
/* Top-down splay procedure, */
/* not requiring kmer to be in tree */
static SplayTree *Splay(Kmer * kmer, SplayTree * T)
{
SplayNode Header;
SplayNode *LeftTreeMax, *RightTreeMin;
if (T == NULL)
return NULL;
Header.left = Header.right = NULL;
LeftTreeMax = RightTreeMin = &Header;
while (compareKmers(kmer, &(T->kmer))) {
if (compareKmers(kmer, &(T->kmer)) < 0) {
if (T->left == NULL)
break;
if (compareKmers(kmer, &(T->left->kmer)) < 0)
T = SingleRotateWithLeft(T);
if (T->left == NULL)
break;
/* Link right */
RightTreeMin->left = T;
RightTreeMin = T;
T = T->left;
} else {
if (T->right == NULL)
break;
if (compareKmers(kmer, &(T->right->kmer)) > 0)
T = SingleRotateWithRight(T);
if (T->right == NULL)
break;
/* Link left */
LeftTreeMax->right = T;
LeftTreeMax = T;
T = T->right;
}
} /* while kmer != T->kmer */
/* Reassemble */
LeftTreeMax->right = T->left;
RightTreeMin->left = T->right;
T->left = Header.right;
T->right = Header.left;
return T;
}
Kmer * findInTree(Kmer * X, SplayTree ** T)
{
*T = Splay(X, *T);
return &((*T)->kmer);
}
void insertIntoTree(Kmer * kmer, SplayTree ** T)
{
SplayNode *newNode;
if (*T == NULL) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->left = newNode->right = NULL;
*T = newNode;
return;
}
*T = Splay(kmer, *T);
if (compareKmers(kmer, &((*T)->kmer)) < 0) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->left = (*T)->left;
newNode->right = *T;
(*T)->left = NULL;
*T = newNode;
} else if (compareKmers(&((*T)->kmer), kmer) < 0) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->right = (*T)->right;
newNode->left = *T;
(*T)->right = NULL;
*T = newNode;
}
}
boolean
findOrInsertOccurenceInSplayTree(Kmer * kmer, IDnum * seqID,
Coordinate * position, SplayTree ** T)
{
SplayNode *newNode;
if (*T == NULL) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->seqID = *seqID;
newNode->position = *position;
newNode->left = newNode->right = NULL;
*T = newNode;
return false;
}
*T = Splay(kmer, *T);
if (compareKmers(kmer, &((*T)->kmer)) < 0) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->seqID = *seqID;
newNode->position = *position;
newNode->left = (*T)->left;
newNode->right = *T;
(*T)->left = NULL;
*T = newNode;
return false;
} else if (compareKmers(kmer, &((*T)->kmer)) > 0) {
newNode = allocateSplayNode();
copyKmers(&(newNode->kmer), kmer);
newNode->seqID = *seqID;
newNode->position = *position;
newNode->right = (*T)->right;
newNode->left = *T;
(*T)->right = NULL;
*T = newNode;
return false;
} else {
*seqID = (*T)->seqID;
*position = (*T)->position;
return true;
}
}
|
TemporalReplicationPadding.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/TemporalReplicationPadding.c"
#else
static void THNN_(TemporalReplicationPadding_updateOutput_frame)(
real *input_p, real *output_p,
long nslices,
long iwidth,
long owidth,
int pad_l, int pad_r)
{
int iStartX = fmax(0, -pad_l);
int oStartX = fmax(0, pad_l);
long k, ip_x;
#pragma omp parallel for private(k, ip_x)
for (k = 0; k < nslices; k++)
{
long j;
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = iwidth + pad_l - 1;
}
ip_x = ip_x - oStartX + iStartX;
real *dest_p = output_p + k*owidth + j;
real *src_p = input_p + k*iwidth + ip_x;
*dest_p = *src_p;
}
}
}
void THNN_(TemporalReplicationPadding_updateOutput)(THNNState *state,
THTensor *input,
THTensor *output,
int pad_l, int pad_r)
{
int dimw = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iwidth;
long owidth;
real *input_data;
real *output_data;
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input,
"non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s");
if (input->dim() == 3)
{
nbatch = input->size(0);
dimw++;
dimslices++;
}
/* sizes */
nslices = input->size(dimslices);
iwidth = input->size(dimw);
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth >= 1 , 2,
"input (W: %d)is too small."
" Calculated output W: %d",
iwidth, owidth);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->dim() == 2)
{
THTensor_(resize2d)(output, nslices, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(TemporalReplicationPadding_updateOutput_frame)(input_data, output_data,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
else
{
long p;
THTensor_(resize3d)(output, nbatch, nslices, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(TemporalReplicationPadding_updateOutput_frame)(
input_data+p*nslices*iwidth,
output_data+p*nslices*owidth,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(TemporalReplicationPadding_updateGradInput_frame)(
real *ginput_p, real *goutput_p,
long nslices,
long iwidth,
long owidth,
int pad_l, int pad_r)
{
int iStartX = fmax(0, -pad_l);
int oStartX = fmax(0, pad_l);
long k, ip_x;
#pragma omp parallel for private(k, ip_x)
for (k = 0; k < nslices; k++)
{
long j;
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = iwidth + pad_l - 1;
}
ip_x = ip_x - oStartX + iStartX;
real *src_p = goutput_p + k*owidth + j;
real *dest_p = ginput_p + k*iwidth + ip_x;
*dest_p += *src_p;
}
}
}
void THNN_(TemporalReplicationPadding_updateGradInput)(THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int pad_l, int pad_r)
{
int dimw = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iwidth;
long owidth;
if (input->dim() == 3)
{
nbatch = input->size(0);
dimw++;
dimslices++;
}
/* sizes */
nslices = input->size(dimslices);
iwidth = input->size(dimw);
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THTensor_(size)(gradOutput, dimw));
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (input->dim() == 2) {
THNN_(TemporalReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput),
THTensor_(data)(gradOutput),
nslices,
iwidth,
owidth,
pad_l, pad_r);
} else {
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++) {
THNN_(TemporalReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput) + p * nslices * iwidth,
THTensor_(data)(gradOutput) + p * nslices * owidth,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
ising.h | #ifndef _ISING_H
#define _ISING_H
#include <iostream>
#include <tuple>
#include <array>
#include <vector>
#include <bitset>
#include <algorithm>
#include <random>
#include <string>
#include <memory>
#include <limits>
#include <type_traits>
#ifdef _OPENMP
#include <valarray>
#include <omp.h>
#endif
#include "asa.h"
#include "arithmeticvector.h"
namespace ising{
template<size_t N> double isingEnergy (void* state);
template<size_t N> double isingMeasure(void* stateI,
void* stateJ);
template<size_t N> void isingStep (const gsl_rng* random,
void* state,
double step);
template<size_t N> void isingPrint (void* state);
template<size_t N, class Model>
struct LatticeType{
LatticeType() = delete;
LatticeType(const LatticeType& rhs);
LatticeType& operator=(const LatticeType& rhs);
LatticeType(std::shared_ptr<std::mt19937>& _randomEngine,
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>>& _uniform,
const Model* _model,
std::bitset<N>* _nodes = nullptr);
std::shared_ptr<std::mt19937> randomEngine;
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>> uniform;
const Model* model;
std::bitset<N> nodes;
size_t flipper;
};
class AbstractIsingModel{
public:
virtual ~AbstractIsingModel() = 0;
};
#ifdef _OPENMP
//template<size_t N, size_t numOfThreads> // N-site lattie
template<size_t N> // N-site lattie
#else
template<size_t N> // N-site lattie
#endif
class IsingModel : public AbstractIsingModel{
public:
IsingModel(bool QUICK_START=true):
randomDevice(),
randomEngine(std::make_shared<std::mt19937>(randomDevice())),
uniform(std::make_shared<
std::uniform_int_distribution<
unsigned long long int>>(
std::numeric_limits<
unsigned long long int>::min(),
std::numeric_limits<
unsigned long long int>::max())),
lattice(randomEngine,uniform,this){
if(QUICK_START) hamiltonian.reserve(N*N/2); // maximum number of interactions (each site with all others)
else hamiltonian.reserve(N); // minimum number of interactions (one per site)
solver.set_energy ( isingEnergy<N> );
solver.set_measure( isingMeasure<N> );
solver.set_step ( isingStep<N> );
#ifdef _VERBOSE
solver.set_print ( isingPrint<N> );
#endif
#ifdef _OPENMP
// energy = std::valarray<double>(0.0,numOfThreads);
// probablity = std::valarray<double>(0.0,numOfThreads);
#endif
}
~IsingModel() override {}
typedef std::tuple<unsigned,unsigned,double> TwoSiteInteraction;
typedef std::vector<TwoSiteInteraction> HamiltonianType;
typedef celerium::ArithmeticVector VectorType;
protected:
std::random_device randomDevice;
std::shared_ptr<std::mt19937> randomEngine;
std::shared_ptr<std::uniform_int_distribution
<unsigned long long int>> uniform;
LatticeType<N,IsingModel> lattice;
gsl::SimulatedAnnealing solver;
HamiltonianType hamiltonian;
std::array<VectorType,3> basis;
std::vector<std::tuple<size_t,VectorType,double>> supercell;
size_t referencePoint;
const std::bitset<N>* mask;
#ifdef _OPENMP
// std::valarray<double> energy;
// std::valarray<double> probablity;
#endif
public:
typename gsl::SimulatedAnnealing::Parameters&
set_parameters(const typename gsl::SimulatedAnnealing::Parameters& _params){
return solver.set_parameters(_params);
}
void set_basis(const std::array<VectorType,3>& _basis){
basis = _basis;
}
void set_supercell(const std::vector<std::tuple<size_t,VectorType,double>>& _supercell){
supercell = _supercell;
}
void set_reference(size_t _reference){
referencePoint = _reference;
}
static unsigned randomize(std::bitset<N>& state,
std::shared_ptr<std::mt19937>& randomEngine,
std::shared_ptr<
std::uniform_int_distribution
<unsigned long long int>>& uniform,
const std::bitset<N>* mask=nullptr){
unsigned ones = (*uniform)(*randomEngine)%static_cast<size_t>(sqrt(N));
unsigned zeros = N - ones;
std::string face(ones,'1');
face += std::string(zeros,'0');
std::shuffle(face.begin(),face.end(),*randomEngine);
state ^= std::bitset<N>(face);
if(mask != nullptr) state &= *mask; // if mask exist, don't flip marked spins
return 0;
}
static unsigned randomize(std::bitset<N>& state,
std::mt19937& randomEngine,
size_t maxNumberOfFlips=
static_cast<size_t>(1.0+2.0*log(N)),
const std::bitset<N>* mask=nullptr){
unsigned ones = std::min(maxNumberOfFlips,N);
unsigned zeros = N - ones;
std::string face(ones,'1');
face += std::string(zeros,'0');
std::shuffle(face.begin(),face.end(),randomEngine);
state ^= std::bitset<N>(face);
if(mask != nullptr) state &= *mask; // if mask exist, don't flip marked spins
return 0;
}
const std::bitset<N>* get_mask() const{
return mask;
}
protected:
// For SFINAE compiler-time evaulation
template<class T>
T tester(T t)const{
if(std::is_integral<T>::value) return static_cast<unsigned>(t);
return t;
}
public:
void add_interaction(...){
std::cerr<<"Wrong input for auxiliary::IsingModel::add_interaction:"<<std::endl;
std::cerr<<" Either non-iterable or iterable of non <int,int,float> tuples."<<std::endl;
std::cerr<<" Nothing was added!"<<std::endl;
}
template<class intlike, class floatlike>
auto add_interaction(intlike i, intlike j, floatlike J) -> decltype((unsigned)(tester<intlike>)(i),void()){
hamiltonian.push_back(std::make_tuple(i,j,J));
}
template<class T>
auto add_interaction(T interaction) -> decltype((TwoSiteInteraction&)(tester<T>)(interaction),void()){
hamiltonian.push_back(interaction);
}
template<class Iterable>
auto add_interaction(const Iterable& interactions) -> decltype((decltype(interactions.begin()))(std::begin)(interactions),void()){
for(auto& interaction : interactions)
hamiltonian.push_back(interaction);
}
std::bitset<N>* get_nodes_ptr(){
return &(lattice.nodes);
}
void clear_hamiltonian(){
hamiltonian.clear();
}
void reset(){
this->clear_hamiltonian();
this->randomize_state();
}
void randomize_state(){
IsingModel<N>::generate_state(lattice.nodes,*(lattice.randomEngine),*(lattice.uniform));
}
const std::bitset<N>& get_nodes() const{
return lattice.nodes;
}
const HamiltonianType& get_hamiltonian() const{
return hamiltonian;
}
std::bitset<N>& get_nodes(){
return lattice.nodes;
}
friend std::ostream& operator<<(std::ostream& stream, const IsingModel& model){
return stream<<model.get_nodes();
}
static double energy(const LatticeType<N,IsingModel>* state){
double E = 0.0;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp for reduction(+:E)
#endif
for(size_t i = 0U; i < state->model->get_hamiltonian().size(); ++i){
E += std::get<2>(state->model->get_hamiltonian()[i])
*(state->nodes[std::get<0>(state->model->get_hamiltonian()[i])]-0.5)
*(state->nodes[std::get<1>(state->model->get_hamiltonian()[i])]-0.5);
}
#ifdef _OPENMP
}
#endif
return E;
}
static double measure(const std::bitset<N>& stateI, const std::bitset<N>& stateJ){
std::bitset<N> output = ~stateI & stateJ;
return output.count();
}
std::bitset<N> run(){
this->mask = nullptr;
#ifdef _VERBOSE
std::cout<<"Starting from: "<<lattice.nodes<<std::endl;
#endif
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
#ifdef _VERBOSE
std::cout<<"Solution: ";
std::cout<<lattice.nodes<<std::endl;
#endif
return lattice.nodes;
}
#ifdef _VERBOSE
std::bitset<N> run(std::bitset<N>* mask){
this->mask = mask;
lattice.nodes &= *mask;
std::cout<<"Starting from: "<<lattice.nodes<<std::endl;
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
std::cout<<"Solution: ";
std::cout<<lattice.nodes<<std::endl;
return lattice.nodes;
}
#else
std::bitset<N> run(std::bitset<N>* mask){
this->mask = mask;
lattice.nodes &= *mask;
solver.run<LatticeType<N,IsingModel<N>>>(lattice,sizeof(lattice));
return lattice.nodes;
}
#endif
static void generate_state(std::bitset<N>& state,
std::mt19937& engine,
std::uniform_int_distribution
<unsigned long long int>& distribution){
constexpr auto seedSize = 8*sizeof(unsigned long long int);
state = std::bitset<N>(distribution(engine));
auto currentSize = seedSize;
while (currentSize < N){
state <<= seedSize;
state |= std::bitset<N>(distribution(engine));
currentSize += seedSize;
}
}
}; // end of class IsingModel
template<size_t N>
double isingEnergy (void* state){
return IsingModel<N>::energy(static_cast<LatticeType<N,IsingModel<N>>*>(state));
}
template<size_t N>
double isingMeasure(void* stateI, void* stateJ){
return IsingModel<N>::measure(
static_cast<LatticeType<N,IsingModel<N>>*>(stateI)->nodes,
static_cast<LatticeType<N,IsingModel<N>>*>(stateJ)->nodes
);
}
template<size_t N>
void isingStep (const gsl_rng* random __attribute__((unused)), void* state, double step __attribute__((unused))){
IsingModel<N>::randomize(
static_cast<LatticeType<N,IsingModel<N>>*>(state)->nodes,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->randomEngine,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->uniform,
static_cast<LatticeType<N,IsingModel<N>>*>(state)->model->get_mask()
);
}
template<size_t N>
void isingPrint (void* state){
#ifndef _QUIET
std::cout<<'\t'<<static_cast<LatticeType<N,IsingModel<N>>*>(state)->nodes;
#endif
}
template<size_t N, class Model>
LatticeType<N,Model>::LatticeType(const LatticeType<N,Model>& rhs){
randomEngine = rhs.randomEngine;
uniform = rhs.uniform;
nodes = rhs.nodes;
model = rhs.model;
}
template<size_t N, class Model>
LatticeType<N,Model>& LatticeType<N,Model>::operator=(const LatticeType<N,Model>& rhs){
randomEngine = rhs.randomEngine;
uniform = rhs.uniform;
nodes = rhs.nodes;
model = rhs.model;
}
template<size_t N, class Model>
LatticeType<N,Model>::LatticeType(
std::shared_ptr<std::mt19937>& _randomEngine,
std::shared_ptr<
std::uniform_int_distribution
<unsigned long long int>>& _uniform,
const Model* _model,
std::bitset<N>* _nodes):randomEngine(_randomEngine),
uniform(_uniform),
model(_model){
if (_nodes == nullptr)
IsingModel<N>::generate_state(nodes,*_randomEngine,*_uniform);
else
nodes = *_nodes;
}
} //end of namespace ising
#endif
|
occupancyGrid.h | /*
* Generate 3D occupancy grid from a 3D closed mesh
* by R. Falque
* 07/02/2020
*/
#ifndef OCCUPANCY_GRID_H
#define OCCUPANCY_GRID_H
#include <Eigen/Core>
#include <unsupported/Eigen/CXX11/Tensor>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include "EigenTools/getMinMax.h"
#include "EigenTools/nanoflannWrapper.h"
#include "sgn.h"
#include "IO/writePNG.h"
#include "IO/process_folder.h"
// polyscope wrapper
class OccupancyGrid
{
private:
Eigen::MatrixXd vertices_;
Eigen::MatrixXd normals_;
int grid_resolution_;
double bounding_box_scale_;
Eigen::Tensor<bool, 3> occupancy_grid_;
double grid_size_;
Eigen::Vector3d source_;
public:
OccupancyGrid(Eigen::MatrixXd & vertices, Eigen::MatrixXd & normals, int grid_resolution, double bounding_box_scale)
{
// store variables in private variables
vertices_ = vertices;
normals_ = normals;
grid_resolution_ = grid_resolution;
bounding_box_scale_ = bounding_box_scale;
// create the occupancy grid
init();
}
// destructor
~OccupancyGrid()
{
}
//accessors
inline Eigen::Tensor<bool, 3> get_occupancy_grid(){return occupancy_grid_;};
inline double get_grid_size(){return grid_size_;};
inline Eigen::Vector3d get_source(){return source_;};
// Class functions
// create the occupancy grid
void init() {
Eigen::Vector3d min_point, max_point;
getMinMax(vertices_, min_point, max_point);
//double bounding_box_size = (max_point - min_point).norm() * bounding_box_scale;
double bounding_box_size = (max_point - min_point).maxCoeff() * bounding_box_scale_; // diagonal versus max direction
double leaf_size = bounding_box_size/(grid_resolution_-1);
double inv_leaf_size = 1.0/leaf_size;
Eigen::Vector3i min_box, max_box, number_of_bins;
min_box << floor(min_point(0)*inv_leaf_size), floor(min_point(1)*inv_leaf_size) , floor(min_point(2)*inv_leaf_size);
max_box << floor(max_point(0)*inv_leaf_size), floor(max_point(1)*inv_leaf_size) , floor(max_point(2)*inv_leaf_size);
number_of_bins << max_box(0) - min_box(0) + 1, max_box(1) - min_box(1) + 1, max_box(2) - min_box(2) + 1;
occupancy_grid_.resize(number_of_bins(0), number_of_bins(1), number_of_bins(2));
nanoflann_wrapper tree(vertices_);
for (int x = 0; x < number_of_bins(0); ++x)
for (int y = 0; y < number_of_bins(1); ++y)
{
#pragma omp parallel for
for (int z = 0; z < number_of_bins(2); ++z)
{
std::vector< int > closest_point;
Eigen::Vector3d point;
point << x, y, z;
point *= leaf_size;
point += min_point;
closest_point = tree.return_k_closest_points(point, 1);
/* produce the outer shell only remove the next line
if ( (point - vertices.row(closest_point[0]).transpose()).norm() < leaf_size(0)*2 )
grid(x, y, z) = true;
else
grid(x, y, z) = false;
*/
// here is the key function
occupancy_grid_(x, y, z) = is_positive( ( vertices_.col(closest_point[0]) - point ).dot( normals_.col(closest_point[0]) ) );
}
}
grid_size_ = leaf_size;
source_ = min_point;
}
// build a graph from the occupied space (there is no garanty of connectivity)
inline bool generate_graph(Eigen::MatrixXd & vertices, Eigen::MatrixXi & edges) {
std::vector< Eigen::Vector3d > vertices_vector;
std::vector< Eigen::Vector2i > edges_vector;
Eigen::Vector3d centroid;
Eigen::Tensor<int, 3> grid_indices(occupancy_grid_.dimension(0), occupancy_grid_.dimension(1), occupancy_grid_.dimension(2));
// build vertices
for (int x = 0; x < occupancy_grid_.dimension(0); ++x)
for (int y = 0; y < occupancy_grid_.dimension(1); ++y)
for (int z = 0; z < occupancy_grid_.dimension(2); ++z) {
grid_indices(x, y, z) = -1;
if (occupancy_grid_(x, y, z) == 1) {
centroid << x, y, z;
centroid *= grid_size_;
centroid += source_;
vertices_vector.push_back(centroid);
grid_indices(x, y, z) = vertices_vector.size();
}
}
// build edges
for (int x = 0; x < occupancy_grid_.dimension(0); ++x)
for (int y = 0; y < occupancy_grid_.dimension(1); ++y)
for (int z = 0; z < occupancy_grid_.dimension(2); ++z)
if (occupancy_grid_(x, y, z) == 1) {
// case on x
if (x-1>=0)
if (occupancy_grid_(x-1, y, z)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x-1, y, z);
edges_vector.push_back(edge_temp);
}
if (x+1<occupancy_grid_.dimension(0))
if (occupancy_grid_(x+1, y, z)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x+1, y, z);
edges_vector.push_back(edge_temp);
}
// case on y
if (y-1>=0)
if (occupancy_grid_(x, y, z-1)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y-1, z);
edges_vector.push_back(edge_temp);
}
if (y+1<occupancy_grid_.dimension(1))
if (occupancy_grid_(x, y, z+1)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y+1, z);
edges_vector.push_back(edge_temp);
}
// case on z
if (z-1>=0)
if (occupancy_grid_(x, y, z-1)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y, z-1);
edges_vector.push_back(edge_temp);
}
if (z+1<occupancy_grid_.dimension(2))
if (occupancy_grid_(x, y, z+1)==1) {
Eigen::Vector2i edge_temp;
edge_temp << grid_indices(x, y, z), grid_indices(x, y, z+1);
edges_vector.push_back(edge_temp);
}
}
vertices.resize(3, vertices_vector.size());
for (int i=0; i< vertices_vector.size(); i++)
vertices.col(i) = vertices_vector[i];
edges.resize(2, edges_vector.size());
for (int i=0; i< edges_vector.size(); i++)
edges.col(i) = edges_vector[i];
return true;
};
// print each slice as an image in the provided folder path
inline bool print_to_folder(std::string folder_name) {
bool folder_exist = does_folder_exist(folder_name);
if (!folder_exist) {
std::cout << "Error: the folder does not exist\n";
create_folder(folder_name);
}
empty_folder(folder_name);
#pragma omp parallel for
for (int i=0; i<occupancy_grid_.dimension(2); i++) {
Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic> slice;
Eigen::Tensor<bool, 2> tensor_slice;
Eigen::array<long int,3> offset = {0,0,i}; //Starting point
Eigen::array<long int,3> extent = {occupancy_grid_.dimension(0),occupancy_grid_.dimension(1),0}; //Finish point
tensor_slice = occupancy_grid_.slice(offset, extent).reshape(Eigen::array<long int,2>{occupancy_grid_.dimension(0),occupancy_grid_.dimension(1)});
slice = Eigen::Map<const Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic>> (tensor_slice.data(), tensor_slice.dimension(0),tensor_slice.dimension(1));
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << i;
std::string s = ss.str();
std::string file_name = folder_name + s + ".png";
writePNG(slice, file_name);
}
std::cout << "Progress: Stack of images written in :" << folder_name << std::endl;
return true;
};
// print the occupancy grid into a yaml file
inline bool print_to_yaml(std::string filename) {
std::string chunk_name = filename + ".yaml";
std::ofstream out_file(chunk_name);
out_file << "? ''\n";
out_file << ": - size: !list_int\n";
out_file << " - " + std::to_string(32) + "\n";
out_file << " - " + std::to_string(32) + "\n";
out_file << " - " + std::to_string(32) + "\n";
out_file << " - entities: !list_end []\n";
out_file << " - blocks: !list_compound\n";
for (int x = 0; x < occupancy_grid_.dimension(0); ++x)
for (int y = 0; y < occupancy_grid_.dimension(1); ++y)
for (int z = 0; z < occupancy_grid_.dimension(2); ++z)
{
if (occupancy_grid_(x, y, z) == 1) {
out_file << " - - pos: !list_int\n";
out_file << " - " + std::to_string(x-round(occupancy_grid_.dimension(0)/2)) + "\n";
out_file << " - " + std::to_string(z) + "\n";
out_file << " - " + std::to_string(-y+round(occupancy_grid_.dimension(1)/2)) + "\n";
out_file << " - state: 0\n";
}
}
out_file << " - author: rFalque\n";
out_file << " - palette: !list_compound\n";
out_file << " - - Properties:\n";
out_file << " - variant: smooth_andesite\n";
out_file << " - Name: minecraft:stone\n";
out_file << " - DataVersion: 2227\n";
out_file.close();
return true;
};
inline bool generate_mesh(Eigen::MatrixXd & vertices, Eigen::MatrixXi & faces) {
std::vector< Eigen::Vector3d > vertices_vector;
std::vector< Eigen::Vector3i > faces_vector;
Eigen::Vector3d centroid;
for (int x = 0; x < occupancy_grid_.dimension(0); ++x)
for (int y = 0; y < occupancy_grid_.dimension(1); ++y)
for (int z = 0; z < occupancy_grid_.dimension(2); ++z)
{
if (occupancy_grid_(x, y, z) == 1) {
centroid << x, y, z;
centroid *= grid_size_;
centroid += source_;
make_cube(centroid, grid_size_, vertices_vector, faces_vector);
}
}
vertices.resize(3, vertices_vector.size());
for (int i=0; i< vertices_vector.size(); i++)
vertices.col(i) = vertices_vector[i];
faces.resize(3, faces_vector.size());
for (int i=0; i< faces_vector.size(); i++)
faces.col(i) = faces_vector[i];
return true;
};
inline bool make_cube(Eigen::Vector3d centroid, double size, std::vector< Eigen::Vector3d > & vertices_vector, std::vector< Eigen::Vector3i > & faces_vector) {
Eigen::Vector3i vertices_offset = Eigen::Vector3i::Constant(vertices_vector.size());
Eigen::Vector3d v_0 ( 0.5, 0.5, 0.5);
Eigen::Vector3d v_1 ( 0.5, 0.5,-0.5);
Eigen::Vector3d v_2 ( 0.5,-0.5, 0.5);
Eigen::Vector3d v_3 ( 0.5,-0.5,-0.5);
Eigen::Vector3d v_4 (-0.5, 0.5, 0.5);
Eigen::Vector3d v_5 (-0.5, 0.5,-0.5);
Eigen::Vector3d v_6 (-0.5,-0.5, 0.5);
Eigen::Vector3d v_7 (-0.5,-0.5,-0.5);
vertices_vector.push_back( v_0*size + centroid );
vertices_vector.push_back( v_1*size + centroid );
vertices_vector.push_back( v_2*size + centroid );
vertices_vector.push_back( v_3*size + centroid );
vertices_vector.push_back( v_4*size + centroid );
vertices_vector.push_back( v_5*size + centroid );
vertices_vector.push_back( v_6*size + centroid );
vertices_vector.push_back( v_7*size + centroid );
Eigen::Vector3i f_0 (0, 3, 1);
Eigen::Vector3i f_1 (0, 2, 3);
Eigen::Vector3i f_2 (0, 1, 5);
Eigen::Vector3i f_3 (0, 5, 4);
Eigen::Vector3i f_4 (4, 5, 7);
Eigen::Vector3i f_5 (4, 7, 6);
Eigen::Vector3i f_6 (2, 6, 7);
Eigen::Vector3i f_7 (2, 7, 3);
Eigen::Vector3i f_8 (1, 3, 7);
Eigen::Vector3i f_9 (1, 7, 5);
Eigen::Vector3i f_10(0, 4, 2);
Eigen::Vector3i f_11(2, 4, 6);
faces_vector.push_back(f_0 + vertices_offset);
faces_vector.push_back(f_1 + vertices_offset);
faces_vector.push_back(f_2 + vertices_offset);
faces_vector.push_back(f_3 + vertices_offset);
faces_vector.push_back(f_4 + vertices_offset);
faces_vector.push_back(f_5 + vertices_offset);
faces_vector.push_back(f_6 + vertices_offset);
faces_vector.push_back(f_7 + vertices_offset);
faces_vector.push_back(f_8 + vertices_offset);
faces_vector.push_back(f_9 + vertices_offset);
faces_vector.push_back(f_10 + vertices_offset);
faces_vector.push_back(f_11 + vertices_offset);
return true;
};
};
#endif |
EmbeddingBag.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#define JIT_REDUCE_COLS_IDX
#ifdef JIT_REDUCE_COLS_IDX
#include <libxsmm.h>
#endif
#include "utils.h"
#include "rtm.h"
template <typename T>
class EmbeddingBagImpl
{
public:
EmbeddingBagImpl(int M, int E) : M(M), E(E)
{
weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment);
}
~EmbeddingBagImpl()
{
my_free(weight_);
weight_ = 0;
}
void init(T low = -0.1, T high = 0.1)
{
init_random(M * E, weight_, low, high);
}
#ifdef JIT_REDUCE_COLS_IDX
void forward(int N, int NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
libxsmm_meltwfunction_reduce_cols_idx kernel;
int _ld = E;
kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32) ;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_reduce_cols_idx_param params;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
params.n = end - start;
params.ind_ptr = &indices[start];
params.inp_ptr = weight;
params.out_ptr = &output[n][0];
kernel( ¶ms );
}
}
#else
void forward(int N, int NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
#pragma omp simd
for (long v = 0; v < E; v++)
output[n][v] = 0;
for (long s = start; s < end; s++)
{
auto ind = indices[s];
#pragma omp simd
for (long v = 0; v < E; v++)
{
output[n][v] += weight[ind][v];
}
}
}
}
#endif
void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
for (long s = start; s < end; s++)
{
#pragma omp simd
#ifdef STREAMING_WRITES
#pragma vector nontemporal(values)
#endif
for (long v = 0; v < E; v++)
values[s][v] = gradout[n][v];
}
}
}
void update(int NS, const T *grads_, const long *indices, float lr)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
long ind = indices[i];
{
TransactionScope guard(fallBackLock, 100, 0);
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
T *weight_;
int M;
int E;
};
typedef EmbeddingBagImpl<FTyp> EmbeddingBag;
|
pr66199-5.c | /* PR middle-end/66199 */
/* { dg-do run } */
#pragma omp declare target
int u[1024], v[1024], w[1024];
#pragma omp end declare target
__attribute__((noinline, noclone)) long
f1 (long a, long b)
{
long d;
#pragma omp target map(from: d)
#pragma omp teams distribute parallel for simd default(none) firstprivate (a, b) shared(u, v, w)
for (d = a; d < b; d++)
u[d] = v[d] + w[d];
return d;
}
__attribute__((noinline, noclone)) long
f2 (long a, long b, long c)
{
long d, e;
#pragma omp target map(from: d, e)
#pragma omp teams distribute parallel for simd default(none) firstprivate (a, b, c) shared(u, v, w) linear(d) lastprivate(e)
for (d = a; d < b; d++)
{
u[d] = v[d] + w[d];
e = c + d * 5;
}
return d + e;
}
__attribute__((noinline, noclone)) long
f3 (long a1, long b1, long a2, long b2)
{
long d1, d2;
#pragma omp target map(from: d1, d2)
#pragma omp teams distribute parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2)
for (d1 = a1; d1 < b1; d1++)
for (d2 = a2; d2 < b2; d2++)
u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];
return d1 + d2;
}
__attribute__((noinline, noclone)) long
f4 (long a1, long b1, long a2, long b2)
{
long d1, d2;
#pragma omp target map(from: d1, d2)
#pragma omp teams distribute parallel for simd default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) collapse(2)
for (d1 = a1; d1 < b1; d1++)
for (d2 = a2; d2 < b2; d2++)
u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2];
return d1 + d2;
}
int
main ()
{
if (f1 (0, 1024) != 1024
|| f2 (0, 1024, 17) != 1024 + (17 + 5 * 1023)
|| f3 (0, 32, 0, 32) != 64
|| f4 (0, 32, 0, 32) != 64)
__builtin_abort ();
return 0;
}
|
ast-dump-openmp-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target
#pragma omp teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target
#pragma omp teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target
#pragma omp teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target
#pragma omp teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target
#pragma omp teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute.c:3:1, line:8:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:29>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:5:1, col:29>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:3> col:3 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:5:1) *const restrict'
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:29>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:29>
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:12:1, col:29>
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:11:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:12:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:41>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 1
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:20:1, col:41>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 1
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:19:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:20:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1>
// CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:41>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int &'
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-OMPTeamsDistributeDirective {{.*}} <line:28:1, col:41>
// CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:27:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:5> col:5 implicit 'int &'
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:28:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1>
// CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19>
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:41>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:41>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-OMPTeamsDistributeDirective {{.*}} <col:1, col:41>
// CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int &'
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int &'
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-OMPTeamsDistributeDirective {{.*}} <line:36:1, col:41>
// CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:30, col:40>
// CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:39> 'int'
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:39> 'int' 2
// CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:35:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:3> col:3 implicit 'int &'
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:5> col:5 implicit 'int &'
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &'
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute.c:36:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long'
// CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-'
// CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*'
// CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast>
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/'
// CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int'
// CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-'
// CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast>
// CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/'
// CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int'
// CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int'
// CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-'
// CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1
// CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast>
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
Graph.h | /**
*@file Graph.h
*@author Sven Fleischer
*There I've coded everything, which has to do anything with the graph
*/
#ifndef GRAPH_H
#define GRAPH_H
#include "dataStruct.h"
#define MAX_LINE_LEN 512
/**
* The graph data structure, with the amount of nodes, the amount of edges per node and a structure, which represents the graph.
*/
struct Graphs{
size_t graphSize;
size_t* vertexNo;
size_t edgeNo;
unsigned int** graph;//an array of every node length, where every node is stored, with the cost, with which it is connected
};
/**
* The steiner tree struct, with an array with all the edges, the weight of the tree and the number of edges.
*/
struct SteinerTree{
double value;//objective value
long int* tree;//store here always the precessor. Is enough for a tree
};
/**
* varForDijkstra data structure, which has the distance array and a marker to the first terminal reached.
*/
struct varForDijkstra{
double* dist;//distance vector
size_t firstNewTerminal;//first reached terminal
long int* prev;//array, where the path is saved+the cost every edge adds
char* visited;
struct BinaryHeap heap;
};
/**
* Parses the graph from a gph file.
*
* @param The input file.
*@return The graph.
*/
struct Graphs buildGraph(FILE *fp){
struct Graphs gra;
char *tmp; //pointer used for cutting strings
char *line = malloc(MAX_LINE_LEN * sizeof(char));
int from = 0;
int to = 0;
long int weightOfEdge = 0;
size_t i = 0;
size_t len = 0;//needed for getline
getline(&line, &len, fp);
//readout the numer of nodes and edges
gra.graphSize = strtol(line, &tmp, 10);
gra.edgeNo = strtol(tmp, &tmp, 10);
gra.vertexNo = malloc(gra.graphSize*sizeof(size_t));
gra.graph = malloc(gra.graphSize*sizeof(size_t*));
//fitting graph
#pragma omp for
for (i = 0; i < gra.graphSize; ++i)
{
gra.graph[i] = malloc(sizeof(size_t*));
gra.vertexNo[i] = 0;
}
//filling the graph by reading out line by line
while ((getline(&line, &len, fp)) != -1)
{
if (*(line) != '\n'){
//interprete the line, and fit it into the graph
from = strtol(line, &tmp, 10);
to = strtol(tmp, &tmp, 10);
weightOfEdge = strtol(tmp, &tmp, 10);
assert(from >=0);
assert(to >=0);
assert(weightOfEdge >=0);
//adding one way
gra.graph[from-1] = realloc(gra.graph[from-1], (gra.vertexNo[from-1]+1) * sizeof(size_t)*2);
assert(gra.graph[from-1]);
gra.graph[from-1][gra.vertexNo[from-1]*2 +0] = to;
gra.graph[from-1][gra.vertexNo[from-1]*2+1] = weightOfEdge;
++gra.vertexNo[from-1];
//adding the other way
gra.graph[to-1] = realloc(gra.graph[to-1], (gra.vertexNo[to-1]+1) * sizeof(size_t)*2);
assert(gra.graph[to-1]);
gra.graph[to-1][gra.vertexNo[to-1]*2+0] = from;
gra.graph[to-1][gra.vertexNo[to-1]*2+1] = weightOfEdge;
++gra.vertexNo[to-1];
}
}
printf("Graph read\n");
return gra;
}
/**
* Computes the shortest paths from nodes already in the steiner tree to the next terminal, and saves the path, too.
* @param The graph, and the Terminals, the variables for Dijkstra and the nodes already in the tree
* @return The shortest path from the current nodes in the tree to the closest steiner tree, with the path and the weight of it
*
*/
struct varForDijkstra dijkstra(struct Graphs gra, size_t* isTerminal, struct varForDijkstra dij, char* inTree){
size_t minVal;
//the dijkstra algo
//this should run till it reaches the first terminal, therefore breakup condition written into the loop
while(1)
{
//getting currently cheapest connected node
minVal = dij.heap.positionInHeap[0];
//breakup condition if termial is reached
if(!inTree[minVal] && (isTerminal[minVal+1] || minVal == 1))
{
dij.firstNewTerminal = minVal;
return dij;
}
//set it visited and reset lastFirst 0
dij.heap.lastFirst = 0;
dij.visited[minVal] = 1;
//update if a new shorter way is found
for (size_t n= 0; n < gra.vertexNo[minVal]; n++){
if(!dij.visited[gra.graph[minVal][2*n + 0]-1] && dij.dist[minVal] != INFINITY && dij.dist[minVal] + (double)gra.graph[minVal][2*n+1] < dij.dist[gra.graph[minVal][2*n+0]-1])
{
dij.dist[gra.graph[minVal][2*n+0]-1] = dij.dist[minVal] + (double)gra.graph[minVal][2*n+1];
assert(!fetestexcept(FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW));
dij.prev[2*(gra.graph[minVal][2*n +0]-1)] = minVal;
dij.prev[2*(gra.graph[minVal][2*n +0]-1)+1] = gra.graph[minVal][2*n+1];
dij.heap.heapVal[gra.graph[minVal][2*n+0]-1] = dij.dist[gra.graph[minVal][2*n + 0]-1];
//update heap
dij.heap = update(dij.heap.reversedIndex[gra.graph[minVal][2*n+0]-1], dij.heap);
}
}
//remove root element, be setting is infinty and pushing it down in the heap
dij.heap = makeFirstIrrelevant(dij.heap, gra.graphSize);
}
return dij;
}
/**
* Finds a steiner tree.
*
* @param All the terminal nodes, the number of terminal nodes, the current start terminal, the graph, and an array which says if this current node is a terminal, and an upper bound, if this is violated, the function stops
* @return The steiner tree with the objective value
*/
struct SteinerTree getSteinerTree(size_t* terminal, size_t numberOfTerminal, size_t startTerminal, struct Graphs gra, size_t* isTerminal, double bestStein){
//setting intermal variables for the steiner tree, if this node is already in the tree
char* inTree = malloc(gra.graphSize*sizeof(char));
size_t counter = 2;
//declaring tree
struct SteinerTree stein;
stein.tree = malloc(sizeof(stein.tree)*gra.graphSize);
//setting objective value 0 at the beginning
stein.value = 0;
//fitting everything for dijkstra, so heap indexholder, previous node and visited
struct varForDijkstra dij;
dij.dist = malloc(sizeof(double)*gra.graphSize);
dij.prev = malloc(sizeof(long int)*gra.graphSize*2);
dij.visited = malloc(sizeof(char)*gra.graphSize);
dij.heap.heapVal = malloc(sizeof(double)*gra.graphSize);
dij.heap.positionInHeap = malloc(sizeof(dij.heap.positionInHeap)*gra.graphSize);
dij.heap.reversedIndex = malloc(sizeof(dij.heap.reversedIndex)*gra.graphSize);
dij.firstNewTerminal = 0;
//fitting the necesary vectors
for (size_t i = 0; i < gra.graphSize; ++i)
{
stein.tree[i] = -1;
dij.visited[i] = 0;
dij.heap.heapVal[i] = INFINITY;
dij.heap.positionInHeap[i] = i;
dij.heap.reversedIndex[i] = i;
inTree[i] = 0;
dij.dist[i] = INFINITY;
dij.heap.heapVal[i] = INFINITY;
dij.prev[2*i] = -2;
dij.prev[2*i+1] = 0;
}
//mark for dijkstra the source
dij.heap.heapVal[terminal[startTerminal]] = 0;
dij.dist[terminal[startTerminal]] = 0;
dij.prev[2*terminal[startTerminal]] = -1;
dij.prev[2*terminal[startTerminal]+1] = 0;
//build the heap
dij.heap = buildMinHeap(dij.heap, gra.graphSize);
//show that the start terminal is in the tree
inTree[terminal[startTerminal]] = 1;
//calculate shortest way till first other terminal
dij = dijkstra(gra, isTerminal, dij, inTree);
//setting iterator, which node is added to the tree
long int iter = dij.firstNewTerminal;
size_t overload = iter;
//adding a path to the new terminal to the tree, and update everything precisely for the dijkstra, that it can start there, where it ended
while(dij.prev[2*iter] != -1)
{
inTree[iter] = 1;
overload = iter;
stein.value += dij.prev[2*iter+1];
assert(!fetestexcept(FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW));
//check for break up condition
if(bestStein <= stein.value)
{
stein.value = INFINITY;
free(stein.tree);
free(dij.heap.positionInHeap);
free(dij.heap.reversedIndex);
free(dij.visited);
free(dij.dist);
free(dij.heap.heapVal);
free(inTree);
return stein;
}
dij.heap.heapVal[iter] = 0;
dij.dist[iter] = 0;
dij.visited[iter] = 0;
dij.heap = update(dij.heap.reversedIndex[iter], dij.heap);
stein.tree[iter] = dij.prev[2*iter];
//making the next step
iter = dij.prev[2*iter];
dij.prev[2*overload] = -1;
}
//each step a terminal is added, so repeat is so often till we should have enough termianls
for( ;counter < numberOfTerminal; ++counter)
{
dij = dijkstra(gra, isTerminal, dij, inTree);
//setting new iterator
iter = dij.firstNewTerminal;
overload = iter;
//adding a path to the new terminal to the tree, and setting their(on the path laying) nodes to 0 for dijkstra
while(dij.prev[2*iter] != -1)
{
inTree[iter] = 1;
overload = iter;
stein.value += dij.prev[2*iter+1];
assert(!fetestexcept(FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW));
//check for break up condition
if(bestStein <= stein.value)
{
stein.value = INFINITY;
free(stein.tree);
free(dij.heap.positionInHeap);
free(dij.heap.reversedIndex);
free(dij.visited);
free(dij.dist);
free(dij.heap.heapVal);
free(inTree);
return stein;
}
dij.heap.heapVal[iter] = 0;
dij.dist[iter] = 0;
dij.visited[iter] = 0;
dij.heap = update(dij.heap.reversedIndex[iter], dij.heap);
assert(stein.tree[iter] == -1);//this has to hold for a tree
stein.tree[iter] = dij.prev[2*iter];
iter = dij.prev[2*iter];
dij.prev[2*overload] = -1;
}
}
//check if tree is still smaller, if delete it so save memory
if(bestStein <= stein.value)
{
stein.value = INFINITY;
free(stein.tree);
free(dij.heap.positionInHeap);
free(dij.heap.reversedIndex);
free(dij.visited);
free(dij.dist);
free(dij.heap.heapVal);
free(inTree);
return stein;
}
//free everything
free(dij.heap.positionInHeap);
free(dij.heap.reversedIndex);
free(dij.visited);
free(dij.dist);
free(dij.heap.heapVal);
free(inTree);
return stein;
}
/**
* Checks if the graph is a tree.
*
* @param A graph, the start node and the char array, where the visited nodes are marked
* @return The char array with the visited nodes. If at the first position is a f, then it is not a tree and cycle was found
*/
char* dfs(struct Graphs gra, size_t start, char* visited){
struct Stack stk;
stk.data = malloc(sizeof(unsigned int)*gra.graphSize);
stk.parents = malloc(sizeof(unsigned int)*gra.graphSize);
stk.topElement = 0;
stk.maxSize = gra.graphSize +1;
stk = push(start, start,stk);
size_t next;
size_t parent;
//as long the stack is not empty
while(stk.topElement != 0)
{
next = stk.data[stk.topElement];
parent = stk.parents[stk.topElement];
stk = pop(stk);
visited[next] = 1;
for(size_t i = 0; i < gra.vertexNo[next]; ++i)
{
if(gra.graph[next][i] != parent)
{
//connecting to new ones, quit if already connected
if(!visited[gra.graph[next][i]])
{
assert(next < gra.graphSize);
stk = push(gra.graph[next][i], next,stk);
}
else
{
//set that dfs failed
visited[0] = 'f';
return visited;
}
}
}
}
return visited;
}
/**
* Checks for the correctness of the solution
*
* @param The supposetely steiner tree, the graph, the terminal set and the amount of terminals
* @return 1 if the solution is a steiner tree and 0 otherwise
*/
char solChecker(struct SteinerTree stein, struct Graphs gra, size_t* terminal, size_t noOfTerminal, int numThreads){
omp_set_num_threads(numThreads);
char* visited = malloc(sizeof(char)*gra.graphSize);
//overwriting now the original graph.
for(size_t i = 0; i < gra.graphSize; ++i)
{
free(gra.graph[i]);
gra.vertexNo[i] = 0;
gra.graph[i] = malloc(sizeof(unsigned int));
visited[i] = 0;
}
//building now the tree, which we want to check by dfs
for(size_t i = 0; i < gra.graphSize; ++i){
if(stein.tree[i] !=-1)
{
//add one way
gra.graph[i] = realloc(gra.graph[i], sizeof(unsigned int)*(gra.vertexNo[i]+1));
gra.graph[i][gra.vertexNo[i]] = stein.tree[i];
++gra.vertexNo[i];
//and another
gra.graph[stein.tree[i]] = realloc(gra.graph[stein.tree[i]], sizeof(unsigned int)*(gra.vertexNo[stein.tree[i]]+1));
gra.graph[stein.tree[i]][gra.vertexNo[stein.tree[i]]] = i;
++gra.vertexNo[stein.tree[i]];
}
}
//markes every visited node and marks if the steiner-tree is not a tree
visited = dfs(gra, 1, visited);
if(visited[0] == 'f') return 0;
//checks if every terminal is beeing visited
for(size_t i = 0; i < noOfTerminal; ++i)
{
//is not complain, and say which one
if(!visited[terminal[i]])
{
return 0;
}
}
return 1;
}
#endif
|
GB_unop__identity_uint16_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint8)
// op(A') function: GB (_unop_tran__identity_uint16_uint8)
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint8)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-parser.c | /* Parser for C and Objective-C.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Parser actions based on the old Bison parser; structure somewhat
influenced by and fragments based on the C++ parser.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
/* TODO:
Make sure all relevant comments, and all relevant code from all
actions, brought over from old parser. Verify exact correspondence
of syntax accepted.
Add testcases covering every input symbol in every state in old and
new parsers.
Include full syntax for GNU C, including erroneous cases accepted
with error messages, in syntax productions in comments.
Make more diagnostics in the front end generally take an explicit
location rather than implicitly using input_location. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "langhooks.h"
#include "input.h"
#include "cpplib.h"
#include "timevar.h"
#include "c-pragma.h"
#include "c-tree.h"
#include "flags.h"
#include "output.h"
#include "toplev.h"
#include "ggc.h"
#include "c-common.h"
#include "vec.h"
#include "target.h"
#include "cgraph.h"
/* Miscellaneous data and functions needed for the parser. */
int yydebug;
/* Objective-C specific parser/lexer information. */
static int objc_pq_context = 0;
/* The following flag is needed to contextualize Objective-C lexical
analysis. In some cases (e.g., 'int NSObject;'), it is undesirable
to bind an identifier to an Objective-C class, even if a class with
that name exists. */
static int objc_need_raw_identifier = 0;
#define OBJC_NEED_RAW_IDENTIFIER(VAL) \
do { \
if (c_dialect_objc ()) \
objc_need_raw_identifier = VAL; \
} while (0)
/* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 d) */
/* For checking property attribute keywords */
static int objc_property_attr_context;
/* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 d) */
/* APPLE LOCAL radar 3803157 - objc attribute (in 4.2 e) */
static tree objc_method_attributes;
/* APPLE LOCAL begin C* language (in 4.2 f) */
/* For checking for 'foreach' context. */
static int objc_foreach_context;
/* APPLE LOCAL end C* language (in 4.2 f) */
/* APPLE LOCAL begin CW asm blocks (in 4.2 g) */
#ifndef IASM_SEE_OPCODE
#define IASM_SEE_OPCODE(YYCHAR, T) YYCHAR
#endif
#define TYPESPEC 1
#define IDENTIFIER 2
/* APPLE LOCAL end CW asm blocks (in 4.2 g) */
/* The reserved keyword table. */
struct resword
{
const char *word;
ENUM_BITFIELD(rid) rid : 16;
unsigned int disable : 16;
};
/* Disable mask. Keywords are disabled if (reswords[i].disable &
mask) is _true_. */
#define D_C89 0x01 /* not in C89 */
#define D_EXT 0x02 /* GCC extension */
#define D_EXT89 0x04 /* GCC extension incorporated in C99 */
#define D_OBJC 0x08 /* Objective C only */
static const struct resword reswords[] =
{
{ "_Bool", RID_BOOL, 0 },
{ "_Complex", RID_COMPLEX, 0 },
/* APPLE LOCAL CW asm blocks (in 4.2 h) */
{ "_asm", RID_ASM, 0 },
{ "_Decimal32", RID_DFLOAT32, D_EXT },
{ "_Decimal64", RID_DFLOAT64, D_EXT },
{ "_Decimal128", RID_DFLOAT128, D_EXT },
{ "__FUNCTION__", RID_FUNCTION_NAME, 0 },
{ "__PRETTY_FUNCTION__", RID_PRETTY_FUNCTION_NAME, 0 },
{ "__alignof", RID_ALIGNOF, 0 },
{ "__alignof__", RID_ALIGNOF, 0 },
{ "__asm", RID_ASM, 0 },
{ "__asm__", RID_ASM, 0 },
{ "__attribute", RID_ATTRIBUTE, 0 },
{ "__attribute__", RID_ATTRIBUTE, 0 },
{ "__builtin_choose_expr", RID_CHOOSE_EXPR, 0 },
{ "__builtin_offsetof", RID_OFFSETOF, 0 },
{ "__builtin_types_compatible_p", RID_TYPES_COMPATIBLE_P, 0 },
{ "__builtin_va_arg", RID_VA_ARG, 0 },
{ "__complex", RID_COMPLEX, 0 },
{ "__complex__", RID_COMPLEX, 0 },
{ "__const", RID_CONST, 0 },
{ "__const__", RID_CONST, 0 },
{ "__extension__", RID_EXTENSION, 0 },
{ "__func__", RID_C99_FUNCTION_NAME, 0 },
{ "__imag", RID_IMAGPART, 0 },
{ "__imag__", RID_IMAGPART, 0 },
{ "__inline", RID_INLINE, 0 },
{ "__inline__", RID_INLINE, 0 },
{ "__label__", RID_LABEL, 0 },
/* APPLE LOCAL private extern (in 4.2 i) */
{ "__private_extern__", RID_PRIVATE_EXTERN, 0 },
{ "__real", RID_REALPART, 0 },
{ "__real__", RID_REALPART, 0 },
{ "__restrict", RID_RESTRICT, 0 },
{ "__restrict__", RID_RESTRICT, 0 },
{ "__signed", RID_SIGNED, 0 },
{ "__signed__", RID_SIGNED, 0 },
{ "__thread", RID_THREAD, 0 },
{ "__typeof", RID_TYPEOF, 0 },
{ "__typeof__", RID_TYPEOF, 0 },
{ "__volatile", RID_VOLATILE, 0 },
{ "__volatile__", RID_VOLATILE, 0 },
{ "asm", RID_ASM, D_EXT },
{ "auto", RID_AUTO, 0 },
{ "break", RID_BREAK, 0 },
{ "case", RID_CASE, 0 },
{ "char", RID_CHAR, 0 },
{ "const", RID_CONST, 0 },
{ "continue", RID_CONTINUE, 0 },
{ "default", RID_DEFAULT, 0 },
{ "do", RID_DO, 0 },
{ "double", RID_DOUBLE, 0 },
{ "else", RID_ELSE, 0 },
{ "enum", RID_ENUM, 0 },
{ "extern", RID_EXTERN, 0 },
{ "float", RID_FLOAT, 0 },
{ "for", RID_FOR, 0 },
{ "goto", RID_GOTO, 0 },
{ "if", RID_IF, 0 },
{ "inline", RID_INLINE, D_EXT89 },
{ "int", RID_INT, 0 },
{ "long", RID_LONG, 0 },
{ "register", RID_REGISTER, 0 },
{ "restrict", RID_RESTRICT, D_C89 },
{ "return", RID_RETURN, 0 },
{ "short", RID_SHORT, 0 },
{ "signed", RID_SIGNED, 0 },
{ "sizeof", RID_SIZEOF, 0 },
{ "static", RID_STATIC, 0 },
{ "struct", RID_STRUCT, 0 },
{ "switch", RID_SWITCH, 0 },
{ "typedef", RID_TYPEDEF, 0 },
{ "typeof", RID_TYPEOF, D_EXT },
{ "union", RID_UNION, 0 },
{ "unsigned", RID_UNSIGNED, 0 },
{ "void", RID_VOID, 0 },
{ "volatile", RID_VOLATILE, 0 },
{ "while", RID_WHILE, 0 },
/* These Objective-C keywords are recognized only immediately after
an '@'. */
{ "class", RID_AT_CLASS, D_OBJC },
{ "compatibility_alias", RID_AT_ALIAS, D_OBJC },
{ "defs", RID_AT_DEFS, D_OBJC },
{ "encode", RID_AT_ENCODE, D_OBJC },
{ "end", RID_AT_END, D_OBJC },
{ "implementation", RID_AT_IMPLEMENTATION, D_OBJC },
{ "interface", RID_AT_INTERFACE, D_OBJC },
/* APPLE LOCAL begin C* language (in 4.2 j) */
{ "optional", RID_AT_OPTIONAL, D_OBJC },
{ "required", RID_AT_REQUIRED, D_OBJC },
/* APPLE LOCAL end C* language (in 4.2 j) */
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 k) */
{ "property", RID_AT_PROPERTY, D_OBJC },
/* APPLE LOCAL radar 4564694 */
{ "package", RID_AT_PACKAGE, D_OBJC },
{ "private", RID_AT_PRIVATE, D_OBJC },
{ "protected", RID_AT_PROTECTED, D_OBJC },
{ "protocol", RID_AT_PROTOCOL, D_OBJC },
{ "public", RID_AT_PUBLIC, D_OBJC },
{ "selector", RID_AT_SELECTOR, D_OBJC },
{ "throw", RID_AT_THROW, D_OBJC },
{ "try", RID_AT_TRY, D_OBJC },
{ "catch", RID_AT_CATCH, D_OBJC },
{ "finally", RID_AT_FINALLY, D_OBJC },
{ "synchronized", RID_AT_SYNCHRONIZED, D_OBJC },
/* These are recognized only in protocol-qualifier context
(see above) */
{ "bycopy", RID_BYCOPY, D_OBJC },
{ "byref", RID_BYREF, D_OBJC },
{ "in", RID_IN, D_OBJC },
{ "inout", RID_INOUT, D_OBJC },
{ "oneway", RID_ONEWAY, D_OBJC },
{ "out", RID_OUT, D_OBJC },
/* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 l) */
/* These are recognized inside a property attribute list */
{ "readonly", RID_READONLY, D_OBJC },
{ "getter", RID_GETTER, D_OBJC },
{ "setter", RID_SETTER, D_OBJC },
/* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 l) */
/* APPLE LOCAL begin objc new property */
{ "synthesize", RID_AT_SYNTHESIZE, D_OBJC },
{ "dynamic", RID_AT_DYNAMIC, D_OBJC },
{ "readwrite", RID_READWRITE, D_OBJC },
{ "assign", RID_ASSIGN, D_OBJC },
{ "retain", RID_RETAIN, D_OBJC },
{ "copy", RID_COPY, D_OBJC },
/* APPLE LOCAL end objc new property */
/* APPLE LOCAL radar 4947014 - objc atomic property */
{ "nonatomic", RID_NONATOMIC, D_OBJC },
};
#define N_reswords (sizeof reswords / sizeof (struct resword))
/* All OpenMP clauses. OpenMP 2.5. */
typedef enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OMP_CLAUSE_COPYPRIVATE,
PRAGMA_OMP_CLAUSE_DEFAULT,
PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
PRAGMA_OMP_CLAUSE_IF,
PRAGMA_OMP_CLAUSE_LASTPRIVATE,
PRAGMA_OMP_CLAUSE_NOWAIT,
PRAGMA_OMP_CLAUSE_NUM_THREADS,
PRAGMA_OMP_CLAUSE_ORDERED,
PRAGMA_OMP_CLAUSE_PRIVATE,
PRAGMA_OMP_CLAUSE_REDUCTION,
PRAGMA_OMP_CLAUSE_SCHEDULE,
PRAGMA_OMP_CLAUSE_SHARED
} pragma_omp_clause;
/* Initialization routine for this file. */
void
c_parse_init (void)
{
/* The only initialization required is of the reserved word
identifiers. */
unsigned int i;
tree id;
int mask = (flag_isoc99 ? 0 : D_C89)
| (flag_no_asm ? (flag_isoc99 ? D_EXT : D_EXT|D_EXT89) : 0);
if (!c_dialect_objc ())
mask |= D_OBJC;
ridpointers = GGC_CNEWVEC (tree, (int) RID_MAX);
for (i = 0; i < N_reswords; i++)
{
/* If a keyword is disabled, do not enter it into the table
and so create a canonical spelling that isn't a keyword. */
if (reswords[i].disable & mask)
continue;
id = get_identifier (reswords[i].word);
C_RID_CODE (id) = reswords[i].rid;
C_IS_RESERVED_WORD (id) = 1;
ridpointers [(int) reswords[i].rid] = id;
}
}
/* The C lexer intermediates between the lexer in cpplib and c-lex.c
and the C parser. Unlike the C++ lexer, the parser structure
stores the lexer information instead of using a separate structure.
Identifiers are separated into ordinary identifiers, type names,
keywords and some other Objective-C types of identifiers, and some
look-ahead is maintained.
??? It might be a good idea to lex the whole file up front (as for
C++). It would then be possible to share more of the C and C++
lexer code, if desired. */
/* The following local token type is used. */
/* A keyword. */
#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
/* More information about the type of a CPP_NAME token. */
typedef enum c_id_kind {
/* An ordinary identifier. */
C_ID_ID,
/* An identifier declared as a typedef name. */
C_ID_TYPENAME,
/* An identifier declared as an Objective-C class name. */
C_ID_CLASSNAME,
/* Not an identifier. */
C_ID_NONE
} c_id_kind;
/* A single C token after string literal concatenation and conversion
of preprocessing tokens to tokens. */
typedef struct c_token GTY (())
{
/* The kind of token. */
ENUM_BITFIELD (cpp_ttype) type : 8;
/* If this token is a CPP_NAME, this value indicates whether also
declared as some kind of type. Otherwise, it is C_ID_NONE. */
ENUM_BITFIELD (c_id_kind) id_kind : 8;
/* If this token is a keyword, this value indicates which keyword.
Otherwise, this value is RID_MAX. */
ENUM_BITFIELD (rid) keyword : 8;
/* APPLE LOCAL begin CW asm blocks */
/* Token flags. */
unsigned char flags;
/* APPLE LOCAL end CW asm blocks */
/* If this token is a CPP_PRAGMA, this indicates the pragma that
was seen. Otherwise it is PRAGMA_NONE. */
ENUM_BITFIELD (pragma_kind) pragma_kind : 7;
/* True if this token is from a system header. */
BOOL_BITFIELD in_system_header : 1;
/* The value associated with this token, if any. */
tree value;
/* The location at which this token was found. */
location_t location;
} c_token;
/* A parser structure recording information about the state and
context of parsing. Includes lexer information with up to two
tokens of look-ahead; more are not needed for C. */
typedef struct c_parser GTY(())
{
/* The look-ahead tokens. */
c_token tokens[2];
/* How many look-ahead tokens are available (0, 1 or 2). */
short tokens_avail;
/* True if a syntax error is being recovered from; false otherwise.
c_parser_error sets this flag. It should clear this flag when
enough tokens have been consumed to recover from the error. */
BOOL_BITFIELD error : 1;
/* True if we're processing a pragma, and shouldn't automatically
consume CPP_PRAGMA_EOL. */
BOOL_BITFIELD in_pragma : 1;
} c_parser;
/* The actual parser and external interface. ??? Does this need to be
garbage-collected? */
static GTY (()) c_parser *the_parser;
/* APPLE LOCAL C* language (in 4.2 ae) */
static c_token * c_parser_peek_2nd_token (c_parser *);
/* Read in and lex a single token, storing it in *TOKEN. */
static void
/* APPLE LOCAL C* language (in 4.2 ae) */
c_lex_one_token (c_token *token, c_parser *parser)
{
timevar_push (TV_LEX);
/* APPLE LOCAL CW asm blocks */
token->type = c_lex_with_flags (&token->value, &token->location, &token->flags);
token->id_kind = C_ID_NONE;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->in_system_header = in_system_header;
switch (token->type)
{
case CPP_NAME:
{
tree decl;
int objc_force_identifier = objc_need_raw_identifier;
OBJC_NEED_RAW_IDENTIFIER (0);
if (C_IS_RESERVED_WORD (token->value))
{
enum rid rid_code = C_RID_CODE (token->value);
/* APPLE LOCAL begin CW asm blocks (in 4.2 ad) */
if (IASM_SEE_OPCODE (TYPESPEC, token->value) == IDENTIFIER
&& iasm_state >= iasm_decls
&& iasm_in_operands == false)
{
/* If this was an opcode, prefer it. */
token->id_kind = C_ID_ID;
break;
}
/* APPLE LOCAL end CW asm blocks (in 4.2 ad) */
if (c_dialect_objc ())
{
if (!OBJC_IS_AT_KEYWORD (rid_code)
/* APPLE LOCAL objc new property */
&& !OBJC_IS_NEW_PATTR_KEYWORD (rid_code)
&& (!OBJC_IS_PQ_KEYWORD (rid_code) || objc_pq_context))
{
/* Return the canonical spelling for this keyword. */
token->value = ridpointers[(int) rid_code];
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
else if (objc_foreach_context && rid_code == RID_IN)
{
/* This is dangerous, we assume we don't need 3 input tokens look ahead. */
c_token *tk = c_parser_peek_2nd_token (parser);
if (tk->type == CPP_NAME
|| tk->type == CPP_OPEN_PAREN
|| tk->type == CPP_MULT
|| tk->type == CPP_PLUS
|| tk->type == CPP_PLUS_PLUS
|| tk->type == CPP_MINUS
|| tk->type == CPP_MINUS_MINUS
/* APPLE LOCAL radar 4529200 (in 4.2 af) */
|| tk->type == CPP_OPEN_SQUARE)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
/* APPLE LOCAL begin objc new property */
else if (objc_property_attr_context && OBJC_IS_NEW_PATTR_KEYWORD (rid_code))
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* APPLE LOCAL end objc new property */
}
else
{
/* Return the canonical spelling for this keyword. */
token->value = ridpointers[(int) rid_code];
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
decl = lookup_name (token->value);
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
{
token->id_kind = C_ID_TYPENAME;
break;
}
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl
&& (global_bindings_p ()
|| (!objc_force_identifier && !decl)))
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
break;
}
}
token->id_kind = C_ID_ID;
}
break;
case CPP_AT_NAME:
/* This only happens in Objective-C; it must be a keyword. */
token->type = CPP_KEYWORD;
token->keyword = C_RID_CODE (token->value);
break;
case CPP_COLON:
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_SEMICOLON:
/* These tokens may affect the interpretation of any identifiers
following, if doing Objective-C. */
OBJC_NEED_RAW_IDENTIFIER (0);
break;
case CPP_PRAGMA:
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = TREE_INT_CST_LOW (token->value);
token->value = NULL;
break;
default:
break;
}
timevar_pop (TV_LEX);
}
/* Return a pointer to the next token from PARSER, reading it in if
necessary. */
static inline c_token *
c_parser_peek_token (c_parser *parser)
{
if (parser->tokens_avail == 0)
{
/* APPLE LOCAL begin switch these two */
parser->tokens_avail = 1;
/* APPLE LOCAL C* language (in 4.2 ae) */
c_lex_one_token (&parser->tokens[0], parser);
/* APPLE LOCAL end switch these two */
}
return &parser->tokens[0];
}
/* Return true if the next token from PARSER has the indicated
TYPE. */
static inline bool
c_parser_next_token_is (c_parser *parser, enum cpp_ttype type)
{
return c_parser_peek_token (parser)->type == type;
}
/* Return true if the next token from PARSER does not have the
indicated TYPE. */
static inline bool
c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type)
{
return !c_parser_next_token_is (parser, type);
}
/* Return true if the next token from PARSER is the indicated
KEYWORD. */
static inline bool
c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
/* Check to see if it is the indicated keyword. */
return token->keyword == keyword;
}
/* Return true if TOKEN can start a type name,
false otherwise. */
static bool
c_token_starts_typename (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start a type name,
false otherwise. */
static inline bool
c_parser_next_token_starts_typename (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_starts_typename (token);
}
/* Return true if TOKEN can start declaration specifiers, false
otherwise. */
static bool
c_token_starts_declspecs (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_STATIC:
case RID_EXTERN:
/* APPLE LOCAL private extern 5487726 */
case RID_PRIVATE_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start declaration
specifiers, false otherwise. */
static inline bool
c_parser_next_token_starts_declspecs (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* APPLE LOCAL begin radar 5277239 */
/* Yes, we can have CLASS.method to mean property-style dot-syntax
notation to call a class method (equiv to [CLASS meth]). */
return c_token_starts_declspecs (token)
&& (token->id_kind != C_ID_CLASSNAME
|| c_parser_peek_2nd_token (parser)->type != CPP_DOT);
/* APPLE LOCAL end radar 5277239 */
}
/* Return a pointer to the next-but-one token from PARSER, reading it
in if necessary. The next token is already read in. */
static c_token *
c_parser_peek_2nd_token (c_parser *parser)
{
if (parser->tokens_avail >= 2)
return &parser->tokens[1];
gcc_assert (parser->tokens_avail == 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL);
/* APPLE LOCAL begin switch these two */
parser->tokens_avail = 2;
/* APPLE LOCAL C* language (in 4.2 ae) */
c_lex_one_token (&parser->tokens[1], parser);
/* APPLE LOCAL end switch these two */
return &parser->tokens[1];
}
/* Consume the next token from PARSER. */
static void
c_parser_consume_token (c_parser *parser)
{
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL);
gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
}
/* Expect the current token to be a #pragma. Consume it and remember
that we've begun parsing a pragma. */
static void
c_parser_consume_pragma (c_parser *parser)
{
gcc_assert (!parser->in_pragma);
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type == CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
parser->in_pragma = true;
}
/* Update the globals input_location and in_system_header from
TOKEN. */
static inline void
c_parser_set_source_position_from_token (c_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
in_system_header = token->in_system_header;
}
}
/* Issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream of PARSER.
MESSAGE (specified by the caller) is usually of the form "expected
OTHER-TOKEN".
Do not issue a diagnostic if still recovering from an error.
??? This is taken from the C++ parser, but building up messages in
this way is not i18n-friendly and some other approach should be
used. */
static void
c_parser_error (c_parser *parser, const char *gmsgid)
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return;
parser->error = true;
if (!gmsgid)
return;
/* This diagnostic makes more sense if it is tagged to the line of
the token we just peeked at. */
c_parser_set_source_position_from_token (token);
c_parse_error (gmsgid,
/* Because c_parse_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->value);
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue the error MSGID. If MSGID is NULL then a message has already
been produced and no message will be produced this time. Returns
true if found, false otherwise. */
static bool
c_parser_require (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
if (c_parser_next_token_is (parser, type))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue the error MSGID. Returns true if found, false otherwise. */
static bool
c_parser_require_keyword (c_parser *parser,
enum rid keyword,
const char *msgid)
{
if (c_parser_next_token_is_keyword (parser, keyword))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* Like c_parser_require, except that tokens will be skipped until the
desired token is found. An error message is still produced if the
next token is not as expected. If MSGID is NULL then a message has
already been produced and no message will be produced this
time. */
static void
c_parser_skip_until_found (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
unsigned nesting_depth = 0;
if (c_parser_require (parser, type, msgid))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
c_token *token = c_parser_peek_token (parser);
/* If we've reached the token we want, consume it and stop. */
if (token->type == type && !nesting_depth)
{
c_parser_consume_token (parser);
break;
}
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until the end of a parameter is found, but do not
consume the comma, semicolon or closing delimiter. */
static void
c_parser_skip_to_end_of_parameter (c_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON)
&& !nesting_depth)
break;
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Expect to be at the end of the pragma directive and consume an
end of line marker. */
static void
c_parser_skip_to_pragma_eol (c_parser *parser)
{
gcc_assert (parser->in_pragma);
parser->in_pragma = false;
if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line"))
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
break;
if (token->type == CPP_PRAGMA_EOL)
{
c_parser_consume_token (parser);
break;
}
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested ';'. */
static void
c_parser_skip_to_end_of_block_or_statement (c_parser *parser)
{
unsigned nesting_depth = 0;
bool save_error = parser->error;
while (true)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
return;
case CPP_PRAGMA_EOL:
if (parser->in_pragma)
return;
break;
case CPP_SEMICOLON:
/* If the next token is a ';', we have reached the
end of the statement. */
if (!nesting_depth)
{
/* Consume the ';'. */
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested '}', then we have
reached the end of the current block. */
if (nesting_depth == 0 || --nesting_depth == 0)
{
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_OPEN_BRACE:
/* If it the next token is a '{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
case CPP_PRAGMA:
/* If we see a pragma, consume the whole thing at once. We
have some safeguards against consuming pragmas willy-nilly.
Normally, we'd expect to be here with parser->error set,
which disables these safeguards. But it's possible to get
here for secondary error recovery, after parser->error has
been cleared. */
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
parser->error = save_error;
continue;
default:
break;
}
c_parser_consume_token (parser);
}
finished:
parser->error = false;
}
/* Save the warning flags which are controlled by __extension__. */
static inline int
disable_extension_diagnostics (void)
{
int ret = (pedantic
| (warn_pointer_arith << 1)
| (warn_traditional << 2)
| (flag_iso << 3));
pedantic = 0;
warn_pointer_arith = 0;
warn_traditional = 0;
flag_iso = 0;
return ret;
}
/* Restore the warning flags which are controlled by __extension__.
FLAGS is the return value from disable_extension_diagnostics. */
static inline void
restore_extension_diagnostics (int flags)
{
pedantic = flags & 1;
warn_pointer_arith = (flags >> 1) & 1;
warn_traditional = (flags >> 2) & 1;
flag_iso = (flags >> 3) & 1;
}
/* Possibly kinds of declarator to parse. */
typedef enum c_dtr_syn {
/* A normal declarator with an identifier. */
C_DTR_NORMAL,
/* An abstract declarator (maybe empty). */
C_DTR_ABSTRACT,
/* A parameter declarator: may be either, but after a type name does
not redeclare a typedef name as an identifier if it can
alternatively be interpreted as a typedef name; see DR#009,
applied in C90 TC1, omitted from C99 and reapplied in C99 TC2
following DR#249. For example, given a typedef T, "int T" and
"int *T" are valid parameter declarations redeclaring T, while
"int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are
abstract declarators rather than involving redundant parentheses;
the same applies with attributes inside the parentheses before
"T". */
C_DTR_PARM
} c_dtr_syn;
/* APPLE LOCAL begin CW asm blocks */
static void c_parser_iasm_top_statement (c_parser *);
static void c_parser_iasm_compound_statement (c_parser *);
static bool c_parser_iasm_bol (c_parser *);
static void c_parser_iasm_line_seq_opt (c_parser*);
/* APPLE LOCAL end CW asm blocks */
static void c_parser_external_declaration (c_parser *);
static void c_parser_asm_definition (c_parser *);
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool, tree*);
static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool,
bool);
static struct c_typespec c_parser_enum_specifier (c_parser *);
static struct c_typespec c_parser_struct_or_union_specifier (c_parser *);
static tree c_parser_struct_declaration (c_parser *);
static struct c_typespec c_parser_typeof_specifier (c_parser *);
static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn,
bool *);
static struct c_declarator *c_parser_direct_declarator (c_parser *, bool,
c_dtr_syn, bool *);
static struct c_declarator *c_parser_direct_declarator_inner (c_parser *,
bool,
struct c_declarator *);
static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree);
static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree);
static struct c_parm *c_parser_parameter_declaration (c_parser *, tree);
static tree c_parser_simple_asm_expr (c_parser *);
static tree c_parser_attributes (c_parser *);
static struct c_type_name *c_parser_type_name (c_parser *);
static struct c_expr c_parser_initializer (c_parser *);
static struct c_expr c_parser_braced_init (c_parser *, tree, bool);
static void c_parser_initelt (c_parser *);
static void c_parser_initval (c_parser *, struct c_expr *);
static tree c_parser_compound_statement (c_parser *);
static void c_parser_compound_statement_nostart (c_parser *);
static void c_parser_label (c_parser *);
static void c_parser_statement (c_parser *);
static void c_parser_statement_after_labels (c_parser *);
static void c_parser_if_statement (c_parser *);
static void c_parser_switch_statement (c_parser *);
static void c_parser_while_statement (c_parser *);
static void c_parser_do_statement (c_parser *);
static void c_parser_for_statement (c_parser *);
static tree c_parser_asm_statement (c_parser *);
static tree c_parser_asm_operands (c_parser *, bool);
static tree c_parser_asm_clobbers (c_parser *);
static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *);
static struct c_expr c_parser_conditional_expression (c_parser *,
struct c_expr *);
static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_unary_expression (c_parser *);
static struct c_expr c_parser_sizeof_expression (c_parser *);
static struct c_expr c_parser_alignof_expression (c_parser *);
static struct c_expr c_parser_postfix_expression (c_parser *);
static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
struct c_type_name *);
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
struct c_expr);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static tree c_parser_expr_list (c_parser *, bool);
static void c_parser_omp_construct (c_parser *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_flush (c_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context);
/* These Objective-C parser functions are only ever called when
compiling Objective-C. */
/* APPLE LOCAL radar 4548636 - class attributes. */
static void c_parser_objc_class_definition (c_parser *, tree);
static void c_parser_objc_class_instance_variables (c_parser *);
static void c_parser_objc_class_declaration (c_parser *);
static void c_parser_objc_alias_declaration (c_parser *);
/* APPLE LOCAL radar 4947311 - protocol attributes */
static void c_parser_objc_protocol_definition (c_parser *, tree);
static enum tree_code c_parser_objc_method_type (c_parser *);
static void c_parser_objc_method_definition (c_parser *);
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 b) */
static void c_parser_objc_interfacedecllist (c_parser *);
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 x) */
static void c_parser_objc_property_declaration (c_parser *);
/* APPLE LOCAL begin objc new property */
static void c_parser_objc_atsynthesize_declaration (c_parser *);
static void c_parser_objc_atdynamic_declaration (c_parser *);
/* APPLE LOCAL end objc new property */
static void c_parser_objc_methodproto (c_parser *);
static tree c_parser_objc_method_decl (c_parser *);
static tree c_parser_objc_type_name (c_parser *);
static tree c_parser_objc_protocol_refs (c_parser *);
static void c_parser_objc_try_catch_statement (c_parser *);
static void c_parser_objc_synchronized_statement (c_parser *);
static tree c_parser_objc_selector (c_parser *);
static tree c_parser_objc_selector_arg (c_parser *);
static tree c_parser_objc_receiver (c_parser *);
static tree c_parser_objc_message_args (c_parser *);
static tree c_parser_objc_keywordexpr (c_parser *);
/* Parse a translation unit (C90 6.7, C99 6.9).
translation-unit:
external-declarations
external-declarations:
external-declaration
external-declarations external-declaration
GNU extensions:
translation-unit:
empty
*/
static void
c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
if (pedantic)
pedwarn ("ISO C forbids an empty source file");
}
else
{
void *obstack_position = obstack_alloc (&parser_obstack, 0);
do
{
ggc_collect ();
c_parser_external_declaration (parser);
obstack_free (&parser_obstack, obstack_position);
}
while (c_parser_next_token_is_not (parser, CPP_EOF));
}
}
/* Parse an external declaration (C90 6.7, C99 6.9).
external-declaration:
function-definition
declaration
GNU extensions:
external-declaration:
asm-definition
;
__extension__ external-declaration
Objective-C:
external-declaration:
objc-class-definition
objc-class-declaration
objc-alias-declaration
objc-protocol-definition
objc-method-definition
@end
*/
static void
c_parser_external_declaration (c_parser *parser)
{
int ext;
switch (c_parser_peek_token (parser)->type)
{
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_EXTENSION:
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_external_declaration (parser);
restore_extension_diagnostics (ext);
break;
case RID_ASM:
c_parser_asm_definition (parser);
break;
case RID_AT_INTERFACE:
case RID_AT_IMPLEMENTATION:
gcc_assert (c_dialect_objc ());
/* APPLE LOCAL radar 4548636 - class attributes. */
c_parser_objc_class_definition (parser, NULL_TREE);
break;
case RID_AT_CLASS:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_declaration (parser);
break;
case RID_AT_ALIAS:
gcc_assert (c_dialect_objc ());
c_parser_objc_alias_declaration (parser);
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
/* APPLE LOCAL begin radar 4947311 - protocol attributes */
c_parser_objc_protocol_definition (parser, NULL_TREE);
break;
/* APPLE LOCAL end radar 4947311 - protocol attributes */
/* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 x) */
case RID_AT_PROPERTY:
c_parser_objc_property_declaration (parser);
break;
/* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 x) */
/* APPLE LOCAL begin objc new property */
case RID_AT_SYNTHESIZE:
c_parser_objc_atsynthesize_declaration (parser);
break;
case RID_AT_DYNAMIC:
c_parser_objc_atdynamic_declaration (parser);
break;
/* APPLE LOCAL end objc new property */
case RID_AT_END:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
objc_finish_implementation ();
break;
default:
goto decl_or_fndef;
}
break;
case CPP_SEMICOLON:
if (pedantic)
pedwarn ("ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external);
break;
case CPP_PLUS:
case CPP_MINUS:
if (c_dialect_objc ())
{
c_parser_objc_method_definition (parser);
break;
}
/* Else fall through, and yield a syntax error trying to parse
as a declaration or function definition. */
default:
decl_or_fndef:
/* A declaration or a function definition. We can only tell
which after parsing the declaration specifiers, if any, and
the first declarator. */
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, true, true, false, true, NULL);
break;
}
}
/* Parse a declaration or function definition (C90 6.5, 6.7.1, C99
6.7, 6.9.1). If FNDEF_OK is true, a function definition is
accepted; otherwise (old-style parameter declarations) only other
declarations are accepted. If NESTED is true, we are inside a
function or parsing old-style parameter declarations; any functions
encountered are nested functions and declaration specifiers are
required; otherwise we are at top level and functions are normal
functions and declaration specifiers may be optional. If EMPTY_OK
is true, empty declarations are OK (subject to all other
constraints); otherwise (old-style parameter declarations) they are
diagnosed. If START_ATTR_OK is true, the declaration specifiers
may start with attributes; otherwise they may not.
declaration:
declaration-specifiers init-declarator-list[opt] ;
function-definition:
declaration-specifiers[opt] declarator declaration-list[opt]
compound-statement
declaration-list:
declaration
declaration-list declaration
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
init-declarator:
declarator simple-asm-expr[opt] attributes[opt]
declarator simple-asm-expr[opt] attributes[opt] = initializer
GNU extensions:
nested-function-definition:
declaration-specifiers declarator declaration-list[opt]
compound-statement
The simple-asm-expr and attributes are GNU extensions.
This function does not handle __extension__; that is handled in its
callers. ??? Following the old parser, __extension__ may start
external declarations, declarations in functions and declarations
at the start of "for" loops, but not old-style parameter
declarations.
C99 requires declaration specifiers in a function definition; the
absence is diagnosed through the diagnosis of implicit int. In GNU
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
OpenMP:
declaration:
threadprivate-directive */
static void
c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool empty_ok,
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
bool nested, bool start_attr_ok, tree *foreach_elem)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
bool diagnosed_no_specs = false;
specs = build_null_declspecs ();
c_parser_declspecs (parser, specs, true, true, start_attr_ok);
if (parser->error)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (nested && !specs->declspecs_seen_p)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (empty_ok)
shadow_tag (specs);
else
{
shadow_tag_warned (specs, 1);
pedwarn ("empty declaration");
}
c_parser_consume_token (parser);
return;
}
/* APPLE LOCAL begin radar 4548636 - class attributes. */
else if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)
|| c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
{
gcc_assert (c_dialect_objc ());
if (!specs->declspecs_seen_p || specs->attrs == NULL_TREE
|| specs->type_seen_p || specs->non_sc_seen_p)
c_parser_error (parser, "no type or storage class may be specified here");
c_parser_objc_class_definition (parser, specs->attrs);
return;
}
/* APPLE LOCAL end radar 4548636 - class attributes. */
/* APPLE LOCAL begin radar 4947311 - protocol attributes */
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL))
{
gcc_assert (c_dialect_objc ());
if (!specs->declspecs_seen_p || specs->attrs == NULL_TREE
|| specs->type_seen_p || specs->non_sc_seen_p)
c_parser_error (parser, "no type or storage class may be specified here");
c_parser_objc_protocol_definition (parser, specs->attrs);
return;
}
/* APPLE LOCAL end radar 4947311 - protocol attributes */
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
while (true)
{
struct c_declarator *declarator;
bool dummy = false;
tree fnbody;
/* Declaring either one or more declarators (in which case we
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (c_parser_next_token_is (parser, CPP_EQ)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
|| c_parser_next_token_is_keyword (parser, RID_IN)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree asm_name = NULL_TREE;
tree postfix_attrs = NULL_TREE;
if (!diagnosed_no_specs && !specs->declspecs_seen_p)
{
diagnosed_no_specs = true;
pedwarn ("data definition has no type or storage class");
}
/* Having seen a data definition, there cannot now be a
function definition. */
fndef_ok = false;
if (c_parser_next_token_is_keyword (parser, RID_ASM))
asm_name = c_parser_simple_asm_expr (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
gcc_assert (foreach_elem);
*foreach_elem = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!*foreach_elem)
*foreach_elem = error_mark_node;
start_init (*foreach_elem, asm_name, global_bindings_p ());
return;
}
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
if (c_parser_next_token_is (parser, CPP_EQ))
{
tree d;
struct c_expr init;
c_parser_consume_token (parser);
/* The declaration of the variable is in effect while
its initializer is parsed. */
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
start_init (d, asm_name, global_bindings_p ());
init = c_parser_initializer (parser);
finish_init ();
if (d != error_mark_node)
{
maybe_warn_string_init (TREE_TYPE (d), init);
finish_decl (d, init.value, asm_name);
}
}
else
{
tree d = start_decl (declarator, specs, false,
chainon (postfix_attrs,
all_prefix_attrs));
if (d)
finish_decl (d, NULL_TREE, asm_name);
}
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
continue;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
return;
}
else
{
c_parser_error (parser, "expected %<,%> or %<;%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
else if (!fndef_ok)
{
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, "
"%<asm%> or %<__attribute__%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* Function definition (nested or otherwise). */
if (nested)
{
if (pedantic)
pedwarn ("ISO C forbids nested functions");
/* APPLE LOCAL begin nested functions 4258406 4357979 (in 4.2 m) */
else if (flag_nested_functions == 0)
error ("nested functions are disabled, use -fnested-functions to re-enable");
/* APPLE LOCAL end nested functions 4258406 4357979 (in 4.2 m) */
push_function_context ();
}
if (!start_function (specs, declarator, all_prefix_attrs))
{
/* This can appear in many cases looking nothing like a
function definition, so we don't give a more specific
error suggesting there was one. */
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> "
"or %<__attribute__%>");
if (nested)
pop_function_context ();
break;
}
/* Parse old-style parameter declarations. ??? Attributes are
not allowed to start declaration specifiers here because of a
syntax conflict between a function declaration with attribute
suffix and a function definition with an attribute prefix on
first old-style parameter declaration. Following the old
parser, they are not accepted on subsequent old-style
parameter declarations either. However, there is no
ambiguity after the first declaration, nor indeed on the
first as long as we don't allow postfix attributes after a
declarator with a nonempty identifier list in a definition;
and postfix attributes have never been accepted here in
function definitions either. */
while (c_parser_next_token_is_not (parser, CPP_EOF)
&& c_parser_next_token_is_not (parser, CPP_OPEN_BRACE))
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, false, false, true, false, NULL);
DECL_SOURCE_LOCATION (current_function_decl)
= c_parser_peek_token (parser)->location;
store_parm_decls ();
fnbody = c_parser_compound_statement (parser);
if (nested)
{
tree decl = current_function_decl;
add_stmt (fnbody);
finish_function ();
pop_function_context ();
add_stmt (build_stmt (DECL_EXPR, decl));
}
else
{
add_stmt (fnbody);
finish_function ();
}
break;
}
}
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
/* This routine finishes up parsing of objc's foreach loop header. */
static tree
finish_parse_foreach_header (c_parser *parser, tree foreach_elem_selector)
{
tree res;
int save_flag_isoc99 = flag_isoc99;
gcc_assert (foreach_elem_selector);
/* Consume 'in' keyword */
c_parser_consume_token (parser);
res = build_tree_list (foreach_elem_selector, c_parser_initializer (parser).value);
finish_init ();
flag_isoc99 = 1;
check_for_loop_decls ();
flag_isoc99 = save_flag_isoc99;
return res;
}
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
/* Parse an asm-definition (asm() outside a function body). This is a
GNU extension.
asm-definition:
simple-asm-expr ;
*/
static void
c_parser_asm_definition (c_parser *parser)
{
/* APPLE LOCAL begin CW asm blocks */
tree asm_str;
if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN)
{
/* This asm is a decl-specifier */
c_parser_declaration_or_fndef (parser, true, true, false, true, NULL);
return;
}
asm_str = c_parser_simple_asm_expr (parser);
/* APPLE LOCAL end CW asm blocks */
if (asm_str)
cgraph_add_asm_node (asm_str);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse some declaration specifiers (possibly none) (C90 6.5, C99
6.7), adding them to SPECS (which may already include some).
Storage class specifiers are accepted iff SCSPEC_OK; type
specifiers are accepted iff TYPESPEC_OK; attributes are accepted at
the start iff START_ATTR_OK.
declaration-specifiers:
storage-class-specifier declaration-specifiers[opt]
type-specifier declaration-specifiers[opt]
type-qualifier declaration-specifiers[opt]
function-specifier declaration-specifiers[opt]
Function specifiers (inline) are from C99, and are currently
handled as storage class specifiers, as is __thread.
C90 6.5.1, C99 6.7.1:
storage-class-specifier:
typedef
extern
static
auto
register
C99 6.7.4:
function-specifier:
inline
C90 6.5.2, C99 6.7.2:
type-specifier:
void
char
short
int
long
float
double
signed
unsigned
_Bool
_Complex
[_Imaginary removed in C99 TC2]
struct-or-union-specifier
enum-specifier
typedef-name
(_Bool and _Complex are new in C99.)
C90 6.5.3, C99 6.7.3:
type-qualifier:
const
restrict
volatile
(restrict is new in C99.)
GNU extensions:
declaration-specifiers:
attributes declaration-specifiers[opt]
storage-class-specifier:
__thread
type-specifier:
typeof-specifier
_Decimal32
_Decimal64
_Decimal128
Objective-C:
type-specifier:
class-name objc-protocol-refs[opt]
typedef-name objc-protocol-refs
objc-protocol-refs
*/
static void
c_parser_declspecs (c_parser *parser, struct c_declspecs *specs,
bool scspec_ok, bool typespec_ok, bool start_attr_ok)
{
bool attrs_ok = start_attr_ok;
bool seen_type = specs->type_seen_p;
while (c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD)
|| (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS)))
{
struct c_typespec t;
tree attrs;
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree value = c_parser_peek_token (parser)->value;
c_id_kind kind = c_parser_peek_token (parser)->id_kind;
/* This finishes the specifiers unless a type name is OK, it
is declared as a type name and a type name hasn't yet
been seen. */
if (!typespec_ok || seen_type
|| (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME))
break;
c_parser_consume_token (parser);
seen_type = true;
attrs_ok = true;
if (kind == C_ID_TYPENAME
&& (!c_dialect_objc ()
|| c_parser_next_token_is_not (parser, CPP_LESS)))
{
t.kind = ctsk_typedef;
/* For a typedef name, record the meaning, not the name.
In case of 'foo foo, bar;'. */
t.spec = lookup_name (value);
}
else
{
tree proto = NULL_TREE;
gcc_assert (c_dialect_objc ());
t.kind = ctsk_objc;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
t.spec = objc_get_protocol_qualified_type (value, proto);
}
declspecs_add_type (specs, t);
continue;
}
if (c_parser_next_token_is (parser, CPP_LESS))
{
/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" -
nisse@lysator.liu.se. */
tree proto;
gcc_assert (c_dialect_objc ());
if (!typespec_ok || seen_type)
break;
proto = c_parser_objc_protocol_refs (parser);
t.kind = ctsk_objc;
t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto);
declspecs_add_type (specs, t);
continue;
}
gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD));
switch (c_parser_peek_token (parser)->keyword)
{
/* APPLE LOCAL begin CW asm blocks (in 4.2 ac) */
case RID_ASM:
/* A rough estimate is that the storage class asm requiers
that '(' not immediately follow the asm. */
if (current_function_decl == NULL_TREE
&& c_parser_peek_2nd_token (parser)->type == CPP_OPEN_PAREN)
goto out;
/* APPLE LOCAL end CW asm blocks (in 4.2 ac) */
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
/* APPLE LOCAL private extern (in 4.2 aa) */
case RID_PRIVATE_EXTERN:
if (!scspec_ok)
goto out;
attrs_ok = true;
/* TODO: Distinguish between function specifiers (inline)
and storage class specifiers, either here or in
declspecs_add_scspec. */
declspecs_add_scspec (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
OBJC_NEED_RAW_IDENTIFIER (1);
t.kind = ctsk_resword;
t.spec = c_parser_peek_token (parser)->value;
declspecs_add_type (specs, t);
c_parser_consume_token (parser);
break;
case RID_ENUM:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_enum_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_STRUCT:
case RID_UNION:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_struct_or_union_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_TYPEOF:
/* ??? The old parser rejected typeof after other type
specifiers, but is a syntax error the best way of
handling this? */
if (!typespec_ok || seen_type)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_typeof_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
attrs_ok = true;
declspecs_add_qual (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_ATTRIBUTE:
if (!attrs_ok)
goto out;
attrs = c_parser_attributes (parser);
declspecs_add_attrs (specs, attrs);
break;
default:
goto out;
}
}
out: ;
}
/* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2).
enum-specifier:
enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt]
enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt]
enum attributes[opt] identifier
The form with trailing comma is new in C99. The forms with
attributes are GNU extensions. In GNU C, we accept any expression
without commas in the syntax (assignment expressions, not just
conditional expressions); assignment expressions will be diagnosed
as non-constant.
enumerator-list:
enumerator
enumerator-list , enumerator
enumerator:
enumeration-constant
enumeration-constant = constant-expression
*/
static struct c_typespec
c_parser_enum_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM));
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse an enum definition. */
tree type = start_enum (ident);
tree postfix_attrs;
/* We chain the enumerators in reverse order, then put them in
forward order at the end. */
tree values = NULL_TREE;
c_parser_consume_token (parser);
while (true)
{
tree enum_id;
tree enum_value;
tree enum_decl;
bool seen_comma;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
enum_id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
enum_value = c_parser_expr_no_commas (parser, NULL).value;
}
else
enum_value = NULL_TREE;
enum_decl = build_enumerator (enum_id, enum_value);
TREE_CHAIN (enum_decl) = values;
values = enum_decl;
seen_comma = false;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
seen_comma = true;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (seen_comma && pedantic && !flag_isoc99)
pedwarn ("comma at end of enumerator list");
c_parser_consume_token (parser);
break;
}
if (!seen_comma)
{
c_parser_error (parser, "expected %<,%> or %<}%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_enum (type, nreverse (values),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
return ret;
}
ret = parser_xref_tag (ENUMERAL_TYPE, ident);
/* In ISO C, enumerated types can be referred to only if already
defined. */
if (pedantic && !COMPLETE_TYPE_P (ret.spec))
pedwarn ("ISO C forbids forward references to %<enum%> types");
return ret;
}
/* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1).
struct-or-union-specifier:
struct-or-union attributes[opt] identifier[opt]
{ struct-contents } attributes[opt]
struct-or-union attributes[opt] identifier
struct-contents:
struct-declaration-list
struct-declaration-list:
struct-declaration ;
struct-declaration-list struct-declaration ;
GNU extensions:
struct-contents:
empty
struct-declaration
struct-declaration-list struct-declaration
struct-declaration-list:
struct-declaration-list ;
;
(Note that in the syntax here, unlike that in ISO C, the semicolons
are included here rather than in struct-declaration, in order to
describe the syntax with extra semicolons and missing semicolon at
end.)
Objective-C:
struct-declaration-list:
@defs ( class-name )
(Note this does not include a trailing semicolon, but can be
followed by further declarations, and gets a pedwarn-if-pedantic
when followed by a semicolon.) */
static struct c_typespec
c_parser_struct_or_union_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
enum tree_code code;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STRUCT:
code = RECORD_TYPE;
break;
case RID_UNION:
code = UNION_TYPE;
break;
default:
gcc_unreachable ();
}
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
tree type = start_struct (code, ident);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
forward order at the end. Each struct-declaration may
declare multiple components (comma-separated), so we must use
chainon to join them, although when parsing each
struct-declaration we can use TREE_CHAIN directly.
The theory behind all this is that there will be more
semicolon separated fields than comma separated fields, and
so we'll be minimizing the number of node traversals required
by chainon. */
tree contents = NULL_TREE;
c_parser_consume_token (parser);
/* Handle the Objective-C @defs construct,
e.g. foo(sizeof(struct{ @defs(ClassName) }));. */
if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS))
{
tree name;
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto end_at_defs;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected class name");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto end_at_defs;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
contents = nreverse (objc_get_class_ivars (name));
/* APPLE LOCAL begin radar 4441551 */
if (flag_objc2_check && flag_objc_abi == 1)
warning (0, "@defs will not be supported in future");
/* APPLE LOCAL radar 4705250 */
else if (flag_objc_abi == 2 && flag_objc_atdefs != 1)
error ("@defs is not supported in new abi");
/* APPLE LOCAL end radar 4441551 */
}
end_at_defs:
/* Parse the struct-declarations and semicolons. Problems with
semicolons are diagnosed here; empty structures are diagnosed
elsewhere. */
while (true)
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (pedantic)
pedwarn ("extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the struct or union contents. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Accept #pragmas at struct scope. */
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations, but not the
trailing semicolon if any. */
decls = c_parser_struct_declaration (parser);
contents = chainon (decls, contents);
/* If no semicolon follows, either we have a parse error or
are at the end of the struct or union and should
pedwarn. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
pedwarn ("no semicolon at end of struct or union");
else
{
c_parser_error (parser, "expected %<;%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
break;
}
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_struct (type, nreverse (contents),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
return ret;
}
ret = parser_xref_tag (code, ident);
return ret;
}
/* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without*
the trailing semicolon.
struct-declaration:
specifier-qualifier-list struct-declarator-list
specifier-qualifier-list:
type-specifier specifier-qualifier-list[opt]
type-qualifier specifier-qualifier-list[opt]
attributes specifier-qualifier-list[opt]
struct-declarator-list:
struct-declarator
struct-declarator-list , attributes[opt] struct-declarator
struct-declarator:
declarator attributes[opt]
declarator[opt] : constant-expression attributes[opt]
GNU extensions:
struct-declaration:
__extension__ struct-declaration
specifier-qualifier-list
Unlike the ISO C syntax, semicolons are handled elsewhere. The use
of attributes where shown is a GNU extension. In GNU C, we accept
any expression without commas in the syntax (assignment
expressions, not just conditional expressions); assignment
expressions will be diagnosed as non-constant. */
static tree
c_parser_struct_declaration (c_parser *parser)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
tree decls;
if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
int ext;
tree decl;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
decl = c_parser_struct_declaration (parser);
restore_extension_diagnostics (ext);
return decl;
}
specs = build_null_declspecs ();
c_parser_declspecs (parser, specs, false, true, true);
if (parser->error)
return NULL_TREE;
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL_TREE;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree ret;
if (!specs->type_seen_p)
{
if (pedantic)
pedwarn ("ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
}
else
{
/* Support for unnamed structs or unions as members of
structs or unions (which is [a] useful and [b] supports
MS P-SDK). */
ret = grokfield (build_id_declarator (NULL_TREE), specs, NULL_TREE);
}
return ret;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
decls = NULL_TREE;
while (true)
{
/* Declaring one or more declarators or un-named bit-fields. */
struct c_declarator *declarator;
bool dummy = false;
if (c_parser_next_token_is (parser, CPP_COLON))
declarator = build_id_declarator (NULL_TREE);
else
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
break;
}
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree postfix_attrs = NULL_TREE;
tree width = NULL_TREE;
tree d;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
width = c_parser_expr_no_commas (parser, NULL).value;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
d = grokfield (declarator, specs, width);
decl_attributes (&d, chainon (postfix_attrs,
all_prefix_attrs), 0);
TREE_CHAIN (d) = decls;
decls = d;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
/* Semicolon consumed in caller. */
break;
}
else
{
c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>");
break;
}
}
else
{
c_parser_error (parser,
"expected %<:%>, %<,%>, %<;%>, %<}%> or "
"%<__attribute__%>");
break;
}
}
return decls;
}
/* Parse a typeof specifier (a GNU extension).
typeof-specifier:
typeof ( expression )
typeof ( type-name )
*/
static struct c_typespec
c_parser_typeof_specifier (c_parser *parser)
{
struct c_typespec ret;
ret.kind = ctsk_typeof;
ret.spec = error_mark_node;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_typeof++;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
skip_evaluation--;
in_typeof--;
return ret;
}
if (c_parser_next_token_starts_typename (parser))
{
struct c_type_name *type = c_parser_type_name (parser);
skip_evaluation--;
in_typeof--;
if (type != NULL)
{
ret.spec = groktypename (type);
pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE));
}
}
else
{
bool was_vm;
struct c_expr expr = c_parser_expression (parser);
skip_evaluation--;
in_typeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error ("%<typeof%> applied to a bit-field");
ret.spec = TREE_TYPE (expr.value);
/* APPLE LOCAL begin radar 4204796 (in 4.2 n) */
if (c_dialect_objc()
&& lookup_attribute ("objc_volatilized", TYPE_ATTRIBUTES (ret.spec)))
ret.spec = build_qualified_type
(ret.spec, (TYPE_QUALS (ret.spec) & ~TYPE_QUAL_VOLATILE));
/* APPLE LOCAL end radar 4204796 (in 4.2 n) */
was_vm = variably_modified_type_p (ret.spec, NULL_TREE);
/* This should be returned with the type so that when the type
is evaluated, this can be evaluated. For now, we avoid
evaluation when the context might. */
if (!skip_evaluation && was_vm)
{
tree e = expr.value;
/* If the expression is not of a type to which we cannot assign a line
number, wrap the thing in a no-op NOP_EXPR. */
if (DECL_P (e) || CONSTANT_CLASS_P (e))
e = build1 (NOP_EXPR, void_type_node, e);
if (EXPR_P (e))
SET_EXPR_LOCATION (e, input_location);
add_stmt (e);
}
pop_maybe_used (was_vm);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return ret;
}
/* Parse a declarator, possibly an abstract declarator (C90 6.5.4,
6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may
be redeclared; otherwise it may not. KIND indicates which kind of
declarator is wanted. Returns a valid declarator except in the
case of a syntax error in which case NULL is returned. *SEEN_ID is
set to true if an identifier being declared is seen; this is used
to diagnose bad forms of abstract array declarators and to
determine whether an identifier list is syntactically permitted.
declarator:
pointer[opt] direct-declarator
direct-declarator:
identifier
( attributes[opt] declarator )
direct-declarator array-declarator
direct-declarator ( parameter-type-list )
direct-declarator ( identifier-list[opt] )
pointer:
* type-qualifier-list[opt]
* type-qualifier-list[opt] pointer
type-qualifier-list:
type-qualifier
attributes
type-qualifier-list type-qualifier
type-qualifier-list attributes
parameter-type-list:
parameter-list
parameter-list , ...
parameter-list:
parameter-declaration
parameter-list , parameter-declaration
parameter-declaration:
declaration-specifiers declarator attributes[opt]
declaration-specifiers abstract-declarator[opt] attributes[opt]
identifier-list:
identifier
identifier-list , identifier
abstract-declarator:
pointer
pointer[opt] direct-abstract-declarator
direct-abstract-declarator:
( attributes[opt] abstract-declarator )
direct-abstract-declarator[opt] array-declarator
direct-abstract-declarator[opt] ( parameter-type-list[opt] )
GNU extensions:
direct-declarator:
direct-declarator ( parameter-forward-declarations
parameter-type-list[opt] )
direct-abstract-declarator:
direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
parameter-list ;
parameter-forward-declarations parameter-list ;
The uses of attributes shown above are GNU extensions.
Some forms of array declarator are not included in C99 in the
syntax for abstract declarators; these are disallowed elsewhere.
This may be a defect (DR#289).
This function also accepts an omitted abstract declarator as being
an abstract declarator, although not part of the formal syntax. */
static struct c_declarator *
c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* Parse any initial pointer part. */
if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_declspecs *quals_attrs = build_null_declspecs ();
struct c_declarator *inner;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true);
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner == NULL)
return NULL;
else
return make_pointer_declarator (quals_attrs, inner);
}
/* Now we have a direct declarator, direct abstract declarator or
nothing (which counts as a direct abstract declarator here). */
return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id);
}
/* Parse a direct declarator or direct abstract declarator; arguments
as c_parser_declarator. */
static struct c_declarator *
c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* The direct declarator must start with an identifier (possibly
omitted) or a parenthesized declarator (possibly abstract). In
an ordinary declarator, initial parentheses must start a
parenthesized declarator. In an abstract declarator or parameter
declarator, they could start a parenthesized declarator or a
parameter list. To tell which, the open parenthesis and any
following attributes must be read. If a declaration specifier
follows, then it is a parameter list; if the specifier is a
typedef name, there might be an ambiguity about redeclaring it,
which is resolved in the direction of treating it as a typedef
name. If a close parenthesis follows, it is also an empty
parameter list, as the syntax does not permit empty abstract
declarators. Otherwise, it is a parenthesized declarator (in
which case the analysis may be repeated inside it, recursively).
??? There is an ambiguity in a parameter declaration "int
(__attribute__((foo)) x)", where x is not a typedef name: it
could be an abstract declarator for a function, or declare x with
parentheses. The proper resolution of this ambiguity needs
documenting. At present we follow an accident of the old
parser's implementation, whereby the first parameter must have
some declaration specifiers other than just attributes. Thus as
a parameter declaration it is treated as a parenthesized
parameter named x, and as an abstract declarator it is
rejected.
??? Also following the old parser, attributes inside an empty
parameter list are ignored, making it a list not yielding a
prototype, rather than giving an error or making it have one
parameter with implicit type int.
??? Also following the old parser, typedef names may be
redeclared in declarators, but not Objective-C class names. */
if (kind != C_DTR_ABSTRACT
&& c_parser_next_token_is (parser, CPP_NAME)
&& ((type_seen_p
/* APPLE LOCAL begin radar 4281748 */
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
/* APPLE LOCAL end radar 4281748 */
|| c_parser_peek_token (parser)->id_kind == C_ID_ID))
{
struct c_declarator *inner
= build_id_declarator (c_parser_peek_token (parser)->value);
*seen_id = true;
inner->id_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
if (kind != C_DTR_NORMAL
&& c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *inner = build_id_declarator (NULL_TREE);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
/* Either we are at the end of an abstract declarator, or we have
parentheses. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_declarator *inner;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (kind != C_DTR_NORMAL
&& (c_parser_next_token_starts_declspecs (parser)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN)))
{
struct c_arg_info *args
= c_parser_parms_declarator (parser, kind == C_DTR_NORMAL,
attrs);
if (args == NULL)
return NULL;
else
{
inner
= build_function_declarator (args,
build_id_declarator (NULL_TREE));
return c_parser_direct_declarator_inner (parser, *seen_id,
inner);
}
}
/* A parenthesized declarator. */
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner != NULL && attrs != NULL)
inner = build_attrs_declarator (attrs, inner);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (inner == NULL)
return NULL;
else
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
else
{
if (kind == C_DTR_NORMAL)
{
c_parser_error (parser, "expected identifier or %<(%>");
return NULL;
}
else
return build_id_declarator (NULL_TREE);
}
}
/* Parse part of a direct declarator or direct abstract declarator,
given that some (in INNER) has already been parsed; ID_PRESENT is
true if an identifier is present, false for an abstract
declarator. */
static struct c_declarator *
c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
struct c_declarator *inner)
{
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
bool star_seen;
tree dimen;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true);
static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC);
if (static_seen)
c_parser_consume_token (parser);
if (static_seen && !quals_attrs->declspecs_seen_p)
c_parser_declspecs (parser, quals_attrs, false, false, true);
if (!quals_attrs->declspecs_seen_p)
quals_attrs = NULL;
/* If "static" is present, there must be an array dimension.
Otherwise, there may be a dimension, "*", or no
dimension. */
if (static_seen)
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
dimen = NULL_TREE;
star_seen = false;
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE)
{
dimen = NULL_TREE;
star_seen = true;
c_parser_consume_token (parser);
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
return NULL;
}
declarator = build_array_declarator (dimen, quals_attrs, static_seen,
star_seen);
if (declarator == NULL)
return NULL;
inner = set_array_declarator_inner (declarator, inner, !id_present);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_arg_info *args;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
args = c_parser_parms_declarator (parser, id_present, attrs);
if (args == NULL)
return NULL;
else
{
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
}
return inner;
}
/* Parse a parameter list or identifier list, including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. ID_LIST_OK is true if an identifier list is
acceptable; such a list must not have attributes at the start. */
static struct c_arg_info *
c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs)
{
push_scope ();
declare_parm_level ();
/* If the list starts with an identifier, it is an identifier list.
Otherwise, it is either a prototype list or an empty list. */
if (id_list_ok
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree list = NULL_TREE, *nextp = &list;
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
*nextp = build_tree_list (NULL_TREE,
c_parser_peek_token (parser)->value);
nextp = & TREE_CHAIN (*nextp);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_error (parser, "expected identifier");
break;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = list;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
c_parser_consume_token (parser);
pop_scope ();
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pop_scope ();
return NULL;
}
}
else
{
struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs);
pop_scope ();
return ret;
}
}
/* Parse a parameter list (possibly empty), including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. */
static struct c_arg_info *
c_parser_parms_list_declarator (c_parser *parser, tree attrs)
{
bool good_parm = false;
/* ??? Following the old parser, forward parameter declarations may
use abstract declarators, and if no real parameter declarations
follow the forward declarations then this is not diagnosed. Also
note as above that attributes are ignored as the only contents of
the parentheses, or as the only contents after forward
declarations. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
c_parser_consume_token (parser);
return ret;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
/* Suppress -Wold-style-definition for this case. */
ret->types = error_mark_node;
error ("ISO C requires a named argument before %<...%>");
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
/* Nonempty list of parameters, either terminated with semicolon
(forward declarations; recurse) or with close parenthesis (normal
function) or with ", ... )" (variadic function). */
while (true)
{
/* Parse a parameter. */
struct c_parm *parm = c_parser_parameter_declaration (parser, attrs);
attrs = NULL_TREE;
if (parm != NULL)
{
good_parm = true;
push_parm_decl (parm);
}
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree new_attrs;
c_parser_consume_token (parser);
mark_forward_parm_decls ();
new_attrs = c_parser_attributes (parser);
return c_parser_parms_list_declarator (parser, new_attrs);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (good_parm)
return get_parm_info (false);
else
{
struct c_arg_info *ret
= XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
return ret;
}
}
if (!c_parser_require (parser, CPP_COMMA,
"expected %<;%>, %<,%> or %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (good_parm)
return get_parm_info (true);
else
{
struct c_arg_info *ret
= XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
return ret;
}
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
}
}
/* Parse a parameter declaration. ATTRS are the attributes at the
start of the declaration if it is the first parameter. */
static struct c_parm *
c_parser_parameter_declaration (c_parser *parser, tree attrs)
{
struct c_declspecs *specs;
struct c_declarator *declarator;
tree prefix_attrs;
tree postfix_attrs = NULL_TREE;
bool dummy = false;
if (!c_parser_next_token_starts_declspecs (parser))
{
/* ??? In some Objective-C cases '...' isn't applicable so there
should be a different message. */
c_parser_error (parser,
"expected declaration specifiers or %<...%>");
c_parser_skip_to_end_of_parameter (parser);
return NULL;
}
specs = build_null_declspecs ();
if (attrs)
{
declspecs_add_attrs (specs, attrs);
attrs = NULL_TREE;
}
c_parser_declspecs (parser, specs, true, true, true);
finish_declspecs (specs);
pending_xref_error ();
prefix_attrs = specs->attrs;
specs->attrs = NULL_TREE;
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_PARM, &dummy);
if (declarator == NULL)
{
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
return NULL;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs),
declarator);
}
/* Parse a string literal in an asm expression. It should not be
translated, and wide string literals are an error although
permitted by the syntax. This is a GNU extension.
asm-string-literal:
string-literal
??? At present, following the old parser, the caller needs to have
set c_lex_string_translate to 0. It would be better to follow the
C++ parser rather than using the c_lex_string_translate kludge. */
static tree
c_parser_asm_string_literal (c_parser *parser)
{
tree str;
if (c_parser_next_token_is (parser, CPP_STRING))
{
str = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_WSTRING))
{
error ("wide string literal in %<asm%>");
str = build_string (1, "");
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected string literal");
str = NULL_TREE;
}
return str;
}
/* Parse a simple asm expression. This is used in restricted
contexts, where a full expression with inputs and outputs does not
make sense. This is a GNU extension.
simple-asm-expr:
asm ( asm-string-literal )
*/
static tree
c_parser_simple_asm_expr (c_parser *parser)
{
tree str;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
return str;
}
/* Parse (possibly empty) attributes. This is a GNU extension.
attributes:
empty
attributes attribute
attribute:
__attribute__ ( ( attribute-list ) )
attribute-list:
attrib
attribute_list , attrib
attrib:
empty
any-word
any-word ( identifier )
any-word ( identifier , nonempty-expr-list )
any-word ( expr-list )
where the "identifier" must not be declared as a type, and
"any-word" may be any identifier (including one declared as a
type), a reserved word storage class specifier, type specifier or
type qualifier. ??? This still leaves out most reserved keywords
(following the old parser), shouldn't we include them, and why not
allow identifiers declared as types to start the arguments? */
static tree
c_parser_attributes (c_parser *parser)
{
tree attrs = NULL_TREE;
while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return attrs;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return attrs;
}
/* Parse the attribute list. */
while (c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD))
{
tree attr, attr_name, attr_args;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
if (c_parser_next_token_is (parser, CPP_KEYWORD))
{
/* ??? See comment above about what keywords are
accepted here. */
bool ok;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_EXTERN:
/* APPLE LOCAL private extern 5487726 */
case RID_PRIVATE_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_SHORT:
case RID_INLINE:
case RID_VOLATILE:
case RID_SIGNED:
case RID_AUTO:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_THREAD:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
ok = true;
break;
default:
ok = false;
break;
}
if (!ok)
break;
}
attr_name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
attr = build_tree_list (attr_name, NULL_TREE);
attrs = chainon (attrs, attr);
continue;
}
c_parser_consume_token (parser);
/* Parse the attribute contents. If they start with an
identifier which is followed by a comma or close
parenthesis, then the arguments start with that
identifier; otherwise they are an expression list. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
&& ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
|| (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN)))
{
tree arg1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = build_tree_list (NULL_TREE, arg1);
else
{
c_parser_consume_token (parser);
attr_args = tree_cons (NULL_TREE, arg1,
c_parser_expr_list (parser, false));
}
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = NULL_TREE;
else
attr_args = c_parser_expr_list (parser, false);
}
attr = build_tree_list (attr_name, attr_args);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
attrs = chainon (attrs, attr);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
c_lex_string_translate = 1;
}
return attrs;
}
/* Parse a type name (C90 6.5.5, C99 6.7.6).
type-name:
specifier-qualifier-list abstract-declarator[opt]
*/
static struct c_type_name *
c_parser_type_name (c_parser *parser)
{
struct c_declspecs *specs = build_null_declspecs ();
struct c_declarator *declarator;
struct c_type_name *ret;
bool dummy = false;
c_parser_declspecs (parser, specs, false, true, true);
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL;
}
pending_xref_error ();
finish_declspecs (specs);
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_ABSTRACT, &dummy);
if (declarator == NULL)
return NULL;
ret = XOBNEW (&parser_obstack, struct c_type_name);
ret->specs = specs;
ret->declarator = declarator;
return ret;
}
/* Parse an initializer (C90 6.5.7, C99 6.7.8).
initializer:
assignment-expression
{ initializer-list }
{ initializer-list , }
initializer-list:
designation[opt] initializer
initializer-list , designation[opt] initializer
designation:
designator-list =
designator-list:
designator
designator-list designator
designator:
array-designator
. identifier
array-designator:
[ constant-expression ]
GNU extensions:
initializer:
{ }
designation:
array-designator
identifier :
array-designator:
[ constant-expression ... constant-expression ]
Any expression without commas is accepted in the syntax for the
constant-expressions, with non-constant expressions rejected later.
This function is only used for top-level initializers; for nested
ones, see c_parser_initval. */
static struct c_expr
c_parser_initializer (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_braced_init (parser, NULL_TREE, false);
else
{
struct c_expr ret;
ret = c_parser_expr_no_commas (parser, NULL);
if (TREE_CODE (ret.value) != STRING_CST
&& TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR)
ret = default_function_array_conversion (ret);
return ret;
}
}
/* Parse a braced initializer list. TYPE is the type specified for a
compound literal, and NULL_TREE for other initializers and for
nested braced lists. NESTED_P is true for nested braced lists,
false for the list of a compound literal or the list that is the
top-level initializer in a declaration. */
static struct c_expr
c_parser_braced_init (c_parser *parser, tree type, bool nested_p)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
if (nested_p)
push_init_level (0);
else
really_start_incremental_init (type);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (pedantic)
pedwarn ("ISO C forbids empty initializer braces");
}
else
{
/* Parse a non-empty initializer list, possibly with a trailing
comma. */
while (true)
{
c_parser_initelt (parser);
if (parser->error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
}
}
if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
struct c_expr ret;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>");
return ret;
}
c_parser_consume_token (parser);
return pop_init_level (0);
}
/* Parse a nested initializer, including designators. */
static void
c_parser_initelt (c_parser *parser)
{
/* Parse any designator or designator list. A single array
designator may have the subsequent "=" omitted in GNU C, but a
longer list or a structure member designator may not. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->value);
if (pedantic)
pedwarn ("obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
/* des_seen is 0 if there have been no designators, 1 if there
has been a single array designator and 2 otherwise. */
int des_seen = 0;
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser, CPP_DOT))
{
int des_prev = des_seen;
if (des_seen < 2)
des_seen++;
if (c_parser_next_token_is (parser, CPP_DOT))
{
des_seen = 2;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
set_init_label (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init);
return;
}
}
else
{
tree first, second;
/* ??? Following the old parser, [ objc-receiver
objc-message-args ] is accepted as an initializer,
being distinguished from a designator by what follows
the first assignment expression inside the square
brackets, but after a first array designator a
subsequent square bracket is for Objective-C taken to
start an expression, using the obsolete form of
designated initializer without '=', rather than
possibly being a second level of designation: in LALR
terms, the '[' is shifted rather than reducing
designator to designator-list. */
if (des_prev == 1 && c_dialect_objc ())
{
des_seen = des_prev;
break;
}
if (des_prev == 0 && c_dialect_objc ())
{
/* This might be an array designator or an
Objective-C message expression. If the former,
continue parsing here; if the latter, parse the
remainder of the initializer given the starting
primary-expression. ??? It might make sense to
distinguish when des_prev == 1 as well; see
previous comment. */
tree rec, args;
struct c_expr mexpr;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->type == CPP_NAME
&& ((c_parser_peek_token (parser)->id_kind
== C_ID_TYPENAME)
|| (c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
/* Type name receiver. */
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
rec = objc_get_class_reference (id);
goto parse_message_args;
}
first = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_ELLIPSIS)
|| c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
goto array_desig_after_first;
/* Expression receiver. So far only one part
without commas has been parsed; there might be
more of the expression. */
rec = first;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
c_parser_consume_token (parser);
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_conversion (next);
rec = build_compound_expr (rec, next.value);
}
parse_message_args:
/* Now parse the objc-message-args. */
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
mexpr.value
= objc_build_message_expr (build_tree_list (rec, args));
mexpr.original_code = ERROR_MARK;
/* Now parse and process the remainder of the
initializer, starting with this message
expression as a primary-expression. */
c_parser_initval (parser, &mexpr);
return;
}
c_parser_consume_token (parser);
first = c_parser_expr_no_commas (parser, NULL).value;
array_desig_after_first:
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
second = c_parser_expr_no_commas (parser, NULL).value;
}
else
second = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
c_parser_consume_token (parser);
set_init_index (first, second);
if (pedantic && second)
pedwarn ("ISO C forbids specifying range of "
"elements to initialize");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
}
if (des_seen >= 1)
{
if (c_parser_next_token_is (parser, CPP_EQ))
{
if (pedantic && !flag_isoc99)
pedwarn ("ISO C90 forbids specifying subobject to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
{
if (pedantic)
pedwarn ("obsolete use of designated initializer "
"without %<=%>");
}
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
c_parser_error (parser, "expected %<=%>");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init);
return;
}
}
}
}
c_parser_initval (parser, NULL);
}
/* Parse a nested initializer; as c_parser_initializer but parses
initializers within braced lists, after any designators have been
applied. If AFTER is not NULL then it is an Objective-C message
expression which is the primary-expression starting the
initializer. */
static void
c_parser_initval (c_parser *parser, struct c_expr *after)
{
struct c_expr init;
gcc_assert (!after || c_dialect_objc ());
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after)
init = c_parser_braced_init (parser, NULL_TREE, true);
else
{
init = c_parser_expr_no_commas (parser, after);
if (init.value != NULL_TREE
&& TREE_CODE (init.value) != STRING_CST
&& TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR)
init = default_function_array_conversion (init);
}
process_init_element (init);
}
/* Parse a compound statement (possibly a function body) (C90 6.6.2,
C99 6.8.2).
compound-statement:
{ block-item-list[opt] }
{ label-declarations block-item-list }
block-item-list:
block-item
block-item-list block-item
block-item:
nested-declaration
statement
nested-declaration:
declaration
GNU extensions:
compound-statement:
{ label-declarations block-item-list }
nested-declaration:
__extension__ nested-declaration
nested-function-definition
label-declarations:
label-declaration
label-declarations label-declaration
label-declaration:
__label__ identifier-list ;
Allowing the mixing of declarations and code is new in C99. The
GNU syntax also permits (not shown above) labels at the end of
compound statements, which yield an error. We don't allow labels
on declarations; this might seem like a natural extension, but
there would be a conflict between attributes on the label and
prefix attributes on the declaration. ??? The syntax follows the
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
OpenMP:
block-item:
openmp-directive
openmp-directive:
barrier-directive
flush-directive */
static tree
c_parser_compound_statement (c_parser *parser)
{
tree stmt;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
return error_mark_node;
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
return c_end_compound_stmt (stmt, true);
}
/* Parse a compound statement except for the opening brace. This is
used for parsing both compound statements and statement expressions
(which follow different paths to handling the opening). */
static void
c_parser_compound_statement_nostart (c_parser *parser)
{
bool last_stmt = false;
bool last_label = false;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
/* APPLE LOCAL begin CW asm blocks (in 4.2 am) */
if (flag_iasm_blocks)
iasm_end_block ();
/* APPLE LOCAL end CW asm blocks */
return;
}
if (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
/* Read zero or more forward-declarations for labels that nested
functions can jump to. */
while (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names,
are OK here. */
while (true)
{
tree label;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
label
= declare_label (c_parser_peek_token (parser)->value);
C_DECLARED_LABEL_FLAG (label) = 1;
add_stmt (build_stmt (DECL_EXPR, label));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* ??? Locating this diagnostic on the token after the
declarations end follows the old parser, but it might be
better to locate it where the declarations start instead. */
if (pedantic)
pedwarn ("ISO C forbids label declarations");
}
/* We must now have at least one statement, label or declaration. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_error (parser, "expected declaration or statement");
c_parser_consume_token (parser);
/* APPLE LOCAL begin CW asm blocks (in 4.2 am) */
if (flag_iasm_blocks)
iasm_end_block ();
/* APPLE LOCAL end CW asm blocks */
return;
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
/* APPLE LOCAL CW asm blocks */
&& iasm_state < iasm_decls
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
{
last_label = true;
last_stmt = false;
c_parser_label (parser);
}
else if (!last_label
&& c_parser_next_token_starts_declspecs (parser))
{
last_label = false;
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, true, true, true, true, NULL);
if (last_stmt
&& ((pedantic && !flag_isoc99)
|| warn_declaration_after_statement))
pedwarn_c90 ("%HISO C90 forbids mixed declarations and code",
&loc);
last_stmt = false;
}
else if (!last_label
&& c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
last_label = false;
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, true, true, true, true, NULL);
/* Following the old parser, __extension__ does not
disable this diagnostic. */
restore_extension_diagnostics (ext);
if (last_stmt
&& ((pedantic && !flag_isoc99)
|| warn_declaration_after_statement))
pedwarn_c90 ("%HISO C90 forbids mixed declarations and code",
&loc);
last_stmt = false;
}
else
goto statement;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
/* External pragmas, and some omp pragmas, are not associated
with regular c code, and so are not to be considered statements
syntactically. This ensures that the user doesn't put them
places that would turn into syntax errors if the directive
were ignored. */
if (c_parser_pragma (parser, pragma_compound))
last_label = false, last_stmt = true;
}
else if (c_parser_next_token_is (parser, CPP_EOF))
{
c_parser_error (parser, "expected declaration or statement");
/* APPLE LOCAL begin CW asm blocks (in 4.2 am) */
if (flag_iasm_blocks)
iasm_end_block ();
/* APPLE LOCAL end CW asm blocks */
return;
}
else
{
statement:
last_label = false;
last_stmt = true;
c_parser_statement_after_labels (parser);
}
/* APPLE LOCAL begin CW asm blocks (in 4.2 al) */
/* MAYBE NOT NEEDED HERE. */
if (flag_iasm_blocks) iasm_in_decl = false;
/* APPLE LOCAL end CW asm blocks (in 4.2 al) */
parser->error = false;
}
/* APPLE LOCAL begin CW asm blocks (in 4.2 am) */
if (flag_iasm_blocks)
iasm_end_block ();
/* APPLE LOCAL end CW asm blocks */
if (last_label)
error ("label at end of compound statement");
c_parser_consume_token (parser);
}
/* Parse a label (C90 6.6.1, C99 6.8.1).
label:
identifier : attributes[opt]
case constant-expression :
default :
GNU extensions:
label:
case constant-expression ... constant-expression :
The use of attributes on labels is a GNU extension. The syntax in
GNU C accepts any expressions without commas, non-constant
expressions being rejected later. */
static void
c_parser_label (c_parser *parser)
{
location_t loc1 = c_parser_peek_token (parser)->location;
tree label = NULL_TREE;
if (c_parser_next_token_is_keyword (parser, RID_CASE))
{
tree exp1, exp2;
c_parser_consume_token (parser);
exp1 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
label = do_case (exp1, NULL_TREE);
}
else if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
exp2 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (exp1, exp2);
}
else
c_parser_error (parser, "expected %<:%> or %<...%>");
}
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
{
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (NULL_TREE, NULL_TREE);
}
else
{
tree name = c_parser_peek_token (parser)->value;
tree tlab;
location_t loc2;
tree attrs;
gcc_assert (c_parser_next_token_is (parser, CPP_NAME));
c_parser_consume_token (parser);
gcc_assert (c_parser_next_token_is (parser, CPP_COLON));
loc2 = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
tlab = define_label (loc2, name);
if (tlab)
{
decl_attributes (&tlab, attrs, 0);
label = add_stmt (build_stmt (LABEL_EXPR, tlab));
}
}
if (label)
SET_EXPR_LOCATION (label, loc1);
}
/* Parse a statement (C90 6.6, C99 6.8).
statement:
labeled-statement
compound-statement
expression-statement
selection-statement
iteration-statement
jump-statement
labeled-statement:
label statement
expression-statement:
expression[opt] ;
selection-statement:
if-statement
switch-statement
iteration-statement:
while-statement
do-statement
for-statement
jump-statement:
goto identifier ;
continue ;
break ;
return expression[opt] ;
GNU extensions:
statement:
asm-statement
jump-statement:
goto * expression ;
Objective-C:
statement:
objc-throw-statement
objc-try-catch-statement
objc-synchronized-statement
objc-throw-statement:
@throw expression ;
@throw ;
OpenMP:
statement:
openmp-construct
openmp-construct:
parallel-construct
for-construct
sections-construct
single-construct
parallel-for-construct
parallel-sections-construct
master-construct
critical-construct
atomic-construct
ordered-construct
parallel-construct:
parallel-directive structured-block
for-construct:
for-directive iteration-statement
sections-construct:
sections-directive section-scope
single-construct:
single-directive structured-block
parallel-for-construct:
parallel-for-directive iteration-statement
parallel-sections-construct:
parallel-sections-directive section-scope
master-construct:
master-directive structured-block
critical-construct:
critical-directive structured-block
atomic-construct:
atomic-directive expression-statement
ordered-construct:
ordered-directive structured-block */
static void
c_parser_statement (c_parser *parser)
{
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
c_parser_statement_after_labels (parser);
}
/* Parse a statement, other than a labeled statement. */
static void
c_parser_statement_after_labels (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree stmt = NULL_TREE;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
add_stmt (c_parser_compound_statement (parser));
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_IF:
c_parser_if_statement (parser);
break;
case RID_SWITCH:
c_parser_switch_statement (parser);
break;
case RID_WHILE:
c_parser_while_statement (parser);
break;
case RID_DO:
c_parser_do_statement (parser);
break;
case RID_FOR:
c_parser_for_statement (parser);
break;
case RID_GOTO:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
stmt = c_finish_goto_label (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
c_parser_consume_token (parser);
stmt = c_finish_goto_ptr (c_parser_expression (parser).value);
}
else
c_parser_error (parser, "expected identifier or %<*%>");
goto expect_semicolon;
case RID_CONTINUE:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (&c_cont_label, false);
goto expect_semicolon;
case RID_BREAK:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (&c_break_label, true);
goto expect_semicolon;
case RID_RETURN:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = c_finish_return (NULL_TREE);
c_parser_consume_token (parser);
}
else
{
stmt = c_finish_return (c_parser_expression_conv (parser).value);
goto expect_semicolon;
}
break;
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = objc_build_throw_stmt (NULL_TREE);
c_parser_consume_token (parser);
}
else
{
stmt
= objc_build_throw_stmt (c_parser_expression (parser).value);
goto expect_semicolon;
}
break;
case RID_AT_TRY:
gcc_assert (c_dialect_objc ());
c_parser_objc_try_catch_statement (parser);
break;
case RID_AT_SYNCHRONIZED:
gcc_assert (c_dialect_objc ());
c_parser_objc_synchronized_statement (parser);
break;
default:
goto expr_stmt;
}
break;
case CPP_SEMICOLON:
c_parser_consume_token (parser);
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
/* Avoid infinite loop in error recovery:
c_parser_skip_until_found stops at a closing nesting
delimiter without consuming it, but here we need to consume
it to proceed further. */
c_parser_error (parser, "expected statement");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_stmt);
break;
default:
expr_stmt:
/* APPLE LOCAL begin CW asm blocks */
/* (in 4.2 al) */
if (iasm_state >= iasm_decls)
{
iasm_state = iasm_asm;
inside_iasm_block = true;
iasm_kill_regs = true;
iasm_in_decl = false;
c_parser_iasm_line_seq_opt (parser);
stmt = NULL_TREE;
break;
}
/* APPLE LOCAL end CW asm blocks */
stmt = c_finish_expr_stmt (c_parser_expression_conv (parser).value);
expect_semicolon:
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
break;
}
/* Two cases cannot and do not have line numbers associated: If stmt
is degenerate, such as "2;", then stmt is an INTEGER_CST, which
cannot hold line numbers. But that's OK because the statement
will either be changed to a MODIFY_EXPR during gimplification of
the statement expr, or discarded. If stmt was compound, but
without new variables, we will have skipped the creation of a
BIND and will have a bare STATEMENT_LIST. But that's OK because
(recursively) all of the component statements should already have
line numbers assigned. ??? Can we discard no-op statements
earlier? */
if (stmt && EXPR_P (stmt))
SET_EXPR_LOCATION (stmt, loc);
}
/* Parse a parenthesized condition from an if, do or while statement.
condition:
( expression )
*/
static tree
c_parser_paren_condition (c_parser *parser)
{
location_t loc;
tree cond;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return error_mark_node;
loc = c_parser_peek_token (parser)->location;
cond = c_objc_common_truthvalue_conversion
(c_parser_expression_conv (parser).value);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, loc);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return cond;
}
/* Parse a statement which is a block in C99. */
static tree
c_parser_c99_block_statement (c_parser *parser)
{
tree block = c_begin_compound_stmt (flag_isoc99);
c_parser_statement (parser);
return c_end_compound_stmt (block, flag_isoc99);
}
/* Parse the body of an if statement or the else half thereof. This
is just parsing a statement but (a) it is a block in C99, (b) we
track whether the body is an if statement for the sake of
-Wparentheses warnings, (c) we handle an empty body specially for
the sake of -Wextra warnings. */
static tree
c_parser_if_body (c_parser *parser, bool *if_p)
{
tree block = c_begin_compound_stmt (flag_isoc99);
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
*if_p = c_parser_next_token_is_keyword (parser, RID_IF);
/* APPLE LOCAL mainline */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
add_stmt (build_empty_stmt ());
c_parser_statement_after_labels (parser);
return c_end_compound_stmt (block, flag_isoc99);
}
/* Parse an if statement (C90 6.6.4, C99 6.8.4).
if-statement:
if ( expression ) statement
if ( expression ) statement else statement
*/
static void
c_parser_if_statement (c_parser *parser)
{
tree block;
location_t loc;
tree cond;
bool first_if = false, second_if = false;
tree first_body, second_body;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
first_body = c_parser_if_body (parser, &first_if);
if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
c_parser_consume_token (parser);
second_body = c_parser_if_body (parser, &second_if);
}
else
second_body = NULL_TREE;
c_finish_if_stmt (loc, cond, first_body, second_body, first_if);
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a switch statement (C90 6.6.4, C99 6.8.4).
switch-statement:
switch (expression) statement
*/
static void
c_parser_switch_statement (c_parser *parser)
{
tree block, expr, body, save_break;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
expr = error_mark_node;
c_start_case (expr);
save_break = c_break_label;
c_break_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_case (body);
if (c_break_label)
add_stmt (build1 (LABEL_EXPR, void_type_node, c_break_label));
c_break_label = save_break;
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a while statement (C90 6.6.5, C99 6.8.5).
while-statement:
APPLE LOCAL begin for-fsf-4_4 3274130 5295549
while attributes (expression) statement
The use of attributes is a GNU extension.
APPLE LOCAL end for-fsf-4_4 3274130 5295549
*/
static void
c_parser_while_statement (c_parser *parser)
{
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
tree block, cond, body, save_break, save_cont, attrs;
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE));
c_parser_consume_token (parser);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
attrs = c_parser_attributes (parser);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, attrs,
true);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
add_stmt (c_end_compound_stmt (block, flag_isoc99));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse a do statement (C90 6.6.5, C99 6.8.5).
do-statement:
APPLE LOCAL begin for-fsf-4_4 3274130 5295549
do attributes statement while ( expression ) ;
The use of attributes is a GNU extension.
APPLE LOCAL end for-fsf-4_4 3274130 5295549
*/
static void
c_parser_do_statement (c_parser *parser)
{
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
tree block, cond, body, save_break, save_cont, new_break, new_cont, attrs;
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO));
c_parser_consume_token (parser);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
attrs = c_parser_attributes (parser);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>");
new_break = c_break_label;
c_break_label = save_break;
new_cont = c_cont_label;
c_cont_label = save_cont;
cond = c_parser_paren_condition (parser);
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
c_finish_loop (loc, cond, NULL, body, new_break, new_cont, attrs, false);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a for statement (C90 6.6.5, C99 6.8.5).
for-statement:
APPLE LOCAL begin for-fsf-4_4 3274130 5295549
for attributes ( expression[opt] ; expression[opt] ; expression[opt] ) \
statement
for attributes ( nested-declaration expression[opt] ; expression[opt] ) \
statement
The form with a declaration is new in C99.
The use of attributes is a GNU extension.
APPLE LOCAL end for-fsf-4_4 3274130 5295549
??? In accordance with the old parser, the declaration may be a
nested function, which is then rejected in check_for_loop_decls,
but does it make any sense for this to be included in the grammar?
Note in particular that the nested function does not include a
trailing ';', whereas the "declaration" production includes one.
Also, can we reject bad declarations earlier and cheaper than
check_for_loop_decls? */
static void
c_parser_for_statement (c_parser *parser)
{
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
tree block, cond, incr, save_break, save_cont, body, attrs;
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
location_t loc;
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
bool foreach_p = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
attrs = c_parser_attributes (parser);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
/* APPLE LOCAL radar 4472881 (in 4.2 ah) */
block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ());
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
/* APPLE LOCAL radar 4472881 (in 4.2 u) */
objc_foreach_context = 1;
/* Parse the initialization declaration or expression. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
c_finish_expr_stmt (NULL_TREE);
}
else if (c_parser_next_token_starts_declspecs (parser))
{
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
cond = NULL_TREE;
c_parser_declaration_or_fndef (parser, true, true, true, true, &cond);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
cond = finish_parse_foreach_header (parser, cond);
foreach_p = true;
}
else
check_for_loop_decls ();
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
}
else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
cond = NULL_TREE;
c_parser_declaration_or_fndef (parser, true, true, true, true, &cond);
restore_extension_diagnostics (ext);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
cond = finish_parse_foreach_header (parser, cond);
foreach_p = true;
}
else
check_for_loop_decls ();
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
}
else
goto init_expr;
}
else
{
init_expr:
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
cond = c_parser_expression (parser).value;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser); /* IN */
cond = build_tree_list (cond, c_parser_initializer (parser).value);
foreach_p = true;
}
else
{
c_finish_expr_stmt (cond);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
objc_foreach_context = 0;
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
/* Parse the loop condition. */
loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
cond = NULL_TREE;
}
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
else if (foreach_p)
;
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
else
{
tree ocond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (ocond);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, loc);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse the increment expression. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
incr = c_process_expr_stmt (NULL_TREE);
else
incr = c_process_expr_stmt (c_parser_expression (parser).value);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
{
cond = error_mark_node;
incr = error_mark_node;
}
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
/* APPLE LOCAL begin radar 4708210 (for_objc_collection in 4.2) */
if (foreach_p)
objc_finish_foreach_loop (loc, cond, body, c_break_label, c_cont_label);
else
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, attrs,
true);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
/* APPLE LOCAL end radar 4708210 (for_objc_collection in 4.2) */
/* APPLE LOCAL radar 4472881 (in 4.2 ai) */
add_stmt (c_end_compound_stmt (block, flag_isoc99 || c_dialect_objc ()));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse an asm statement, a GNU extension. This is a full-blown asm
statement with inputs, outputs, clobbers, and volatile tag
allowed.
asm-statement:
asm type-qualifier[opt] ( asm-argument ) ;
asm-argument:
asm-string-literal
asm-string-literal : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers
Qualifiers other than volatile are accepted in the syntax but
warned for. */
static tree
c_parser_asm_statement (c_parser *parser)
{
tree quals, str, outputs, inputs, clobbers, ret;
bool simple;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
/* APPLE LOCAL CW asm blocks */
iasm_state = iasm_decls;
if (c_parser_next_token_is_keyword (parser, RID_VOLATILE))
{
quals = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_CONST)
|| c_parser_next_token_is_keyword (parser, RID_RESTRICT))
{
warning (0, "%E qualifier ignored on asm",
c_parser_peek_token (parser)->value);
quals = NULL_TREE;
c_parser_consume_token (parser);
}
else
quals = NULL_TREE;
/* APPLE LOCAL begin CW asm blocks */
/* A CW-style asm block is introduced by an open brace. */
/* (in 4.2 as) */
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
if (quals)
warning (0, "%E qualifier ignored on asm", quals);
c_parser_consume_token (parser);
if (flag_iasm_blocks)
c_parser_iasm_compound_statement (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_error (parser, "asm blocks not enabled, use `-fasm-blocks'");
iasm_state = iasm_none;
}
return NULL_TREE;
}
if (quals == NULL_TREE
&& (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser, CPP_ATSIGN)
|| c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| (c_parser_iasm_bol (parser)
&& ! c_parser_next_token_is (parser, CPP_OPEN_PAREN))))
{
if (flag_iasm_blocks)
c_parser_iasm_top_statement (parser);
else
error ("asm blocks not enabled, use `-fasm-blocks'");
return NULL_TREE;
}
iasm_state = iasm_none;
/* APPLE LOCAL end CW asm blocks */
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
simple = true;
outputs = NULL_TREE;
inputs = NULL_TREE;
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
simple = false;
/* Parse outputs. */
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
outputs = NULL_TREE;
else
outputs = c_parser_asm_operands (parser, false);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
inputs = NULL_TREE;
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
/* Parse inputs. */
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
inputs = NULL_TREE;
else
inputs = c_parser_asm_operands (parser, true);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
/* Parse clobbers. */
clobbers = c_parser_asm_clobbers (parser);
done_asm:
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
ret = build_asm_stmt (quals, build_asm_expr (str, outputs, inputs,
clobbers, simple));
return ret;
}
/* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but
not outputs), apply the default conversion of functions and arrays
to pointers.
asm-operands:
asm-operand
asm-operands , asm-operand
asm-operand:
asm-string-literal ( expression )
[ identifier ] asm-string-literal ( expression )
*/
static tree
c_parser_asm_operands (c_parser *parser, bool convert_p)
{
tree list = NULL_TREE;
while (true)
{
tree name, str;
struct c_expr expr;
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
name = build_string (IDENTIFIER_LENGTH (id),
IDENTIFIER_POINTER (id));
}
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL);
return NULL_TREE;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
else
name = NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
return NULL_TREE;
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 0;
return NULL_TREE;
}
expr = c_parser_expression (parser);
if (convert_p)
expr = default_function_array_conversion (expr);
c_lex_string_translate = 0;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
list = chainon (list, build_tree_list (build_tree_list (name, str),
expr.value));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm clobbers, a GNU extension.
asm-clobbers:
asm-string-literal
asm-clobbers , asm-string-literal
*/
static tree
c_parser_asm_clobbers (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree str = c_parser_asm_string_literal (parser);
if (str)
list = tree_cons (NULL_TREE, str, list);
else
return NULL_TREE;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse an expression other than a compound expression; that is, an
assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
assignment-expression:
conditional-expression
unary-expression assignment-operator assignment-expression
assignment-operator: one of
= *= /= %= += -= <<= >>= &= ^= |=
In GNU C we accept any conditional expression on the LHS and
diagnose the invalid lvalue rather than producing a syntax
error. */
static struct c_expr
c_parser_expr_no_commas (c_parser *parser, struct c_expr *after)
{
struct c_expr lhs, rhs, ret;
enum tree_code code;
gcc_assert (!after || c_dialect_objc ());
lhs = c_parser_conditional_expression (parser, after);
switch (c_parser_peek_token (parser)->type)
{
case CPP_EQ:
code = NOP_EXPR;
break;
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
code = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
default:
return lhs;
}
c_parser_consume_token (parser);
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = default_function_array_conversion (rhs);
ret.value = build_modify_expr (lhs.value, code, rhs.value);
if (code == NOP_EXPR)
ret.original_code = MODIFY_EXPR;
else
{
TREE_NO_WARNING (ret.value) = 1;
ret.original_code = ERROR_MARK;
}
return ret;
}
/* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER
is not NULL then it is an Objective-C message expression which is
the primary-expression starting the expression as an initializer.
conditional-expression:
logical-OR-expression
logical-OR-expression ? expression : conditional-expression
GNU extensions:
conditional-expression:
logical-OR-expression ? : conditional-expression
*/
static struct c_expr
c_parser_conditional_expression (c_parser *parser, struct c_expr *after)
{
struct c_expr cond, exp1, exp2, ret;
gcc_assert (!after || c_dialect_objc ());
cond = c_parser_binary_expression (parser, after);
if (c_parser_next_token_is_not (parser, CPP_QUERY))
return cond;
cond = default_function_array_conversion (cond);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
if (pedantic)
pedwarn ("ISO C forbids omitting the middle term of a ?: expression");
/* Make sure first operand is calculated only once. */
exp1.value = save_expr (default_conversion (cond.value));
cond.value = c_objc_common_truthvalue_conversion (exp1.value);
skip_evaluation += cond.value == truthvalue_true_node;
}
else
{
cond.value
= c_objc_common_truthvalue_conversion
(default_conversion (cond.value));
skip_evaluation += cond.value == truthvalue_false_node;
exp1 = c_parser_expression_conv (parser);
skip_evaluation += ((cond.value == truthvalue_true_node)
- (cond.value == truthvalue_false_node));
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
skip_evaluation -= cond.value == truthvalue_true_node;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
exp2 = c_parser_conditional_expression (parser, NULL);
exp2 = default_function_array_conversion (exp2);
skip_evaluation -= cond.value == truthvalue_true_node;
ret.value = build_conditional_expr (cond.value, exp1.value, exp2.value);
ret.original_code = ERROR_MARK;
return ret;
}
/* Parse a binary expression; that is, a logical-OR-expression (C90
6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is
an Objective-C message expression which is the primary-expression
starting the expression as an initializer.
multiplicative-expression:
cast-expression
multiplicative-expression * cast-expression
multiplicative-expression / cast-expression
multiplicative-expression % cast-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
AND-expression:
equality-expression
AND-expression & equality-expression
exclusive-OR-expression:
AND-expression
exclusive-OR-expression ^ AND-expression
inclusive-OR-expression:
exclusive-OR-expression
inclusive-OR-expression | exclusive-OR-expression
logical-AND-expression:
inclusive-OR-expression
logical-AND-expression && inclusive-OR-expression
logical-OR-expression:
logical-AND-expression
logical-OR-expression || logical-AND-expression
*/
static struct c_expr
c_parser_binary_expression (c_parser *parser, struct c_expr *after)
{
/* A binary expression is parsed using operator-precedence parsing,
with the operands being cast expressions. All the binary
operators are left-associative. Thus a binary expression is of
form:
E0 op1 E1 op2 E2 ...
which we represent on a stack. On the stack, the precedence
levels are strictly increasing. When a new operator is
encountered of higher precedence than that at the top of the
stack, it is pushed; its LHS is the top expression, and its RHS
is everything parsed until it is popped. When a new operator is
encountered with precedence less than or equal to that at the top
of the stack, triples E[i-1] op[i] E[i] are popped and replaced
by the result of the operation until the operator at the top of
the stack has lower precedence than the new operator or there is
only one element on the stack; then the top expression is the LHS
of the new operator. In the case of logical AND and OR
expressions, we also need to adjust skip_evaluation as
appropriate when the operators are pushed and popped. */
/* The precedence levels, where 0 is a dummy lowest level used for
the bottom of the stack. */
enum prec {
PREC_NONE,
PREC_LOGOR,
PREC_LOGAND,
PREC_BITOR,
PREC_BITXOR,
PREC_BITAND,
PREC_EQ,
PREC_REL,
PREC_SHIFT,
PREC_ADD,
PREC_MULT,
NUM_PRECS
};
struct {
/* The expression at this stack level. */
struct c_expr expr;
/* The precedence of the operator on its left, PREC_NONE at the
bottom of the stack. */
enum prec prec;
/* The operation on its left. */
enum tree_code op;
} stack[NUM_PRECS];
int sp;
#define POP \
do { \
switch (stack[sp].op) \
{ \
case TRUTH_ANDIF_EXPR: \
skip_evaluation -= stack[sp - 1].expr.value == truthvalue_false_node; \
break; \
case TRUTH_ORIF_EXPR: \
skip_evaluation -= stack[sp - 1].expr.value == truthvalue_true_node; \
break; \
default: \
break; \
} \
stack[sp - 1].expr \
= default_function_array_conversion (stack[sp - 1].expr); \
stack[sp].expr \
= default_function_array_conversion (stack[sp].expr); \
stack[sp - 1].expr = parser_build_binary_op (stack[sp].op, \
stack[sp - 1].expr, \
stack[sp].expr); \
sp--; \
} while (0)
gcc_assert (!after || c_dialect_objc ());
stack[0].expr = c_parser_cast_expression (parser, after);
/* APPLE LOCAL begin radar 4426814 */
if (c_dialect_objc() && flag_objc_gc)
/* APPLE LOCAL radar 5276085 */
stack[0].expr.value = objc_build_weak_reference_tree (stack[0].expr.value);
/* APPLE LOCAL end radar 4426814 */
stack[0].prec = PREC_NONE;
sp = 0;
while (true)
{
enum prec oprec;
enum tree_code ocode;
if (parser->error)
goto out;
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT:
oprec = PREC_MULT;
ocode = MULT_EXPR;
break;
case CPP_DIV:
oprec = PREC_MULT;
ocode = TRUNC_DIV_EXPR;
break;
case CPP_MOD:
oprec = PREC_MULT;
ocode = TRUNC_MOD_EXPR;
break;
case CPP_PLUS:
oprec = PREC_ADD;
ocode = PLUS_EXPR;
break;
case CPP_MINUS:
oprec = PREC_ADD;
ocode = MINUS_EXPR;
break;
case CPP_LSHIFT:
oprec = PREC_SHIFT;
ocode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
oprec = PREC_SHIFT;
ocode = RSHIFT_EXPR;
break;
case CPP_LESS:
oprec = PREC_REL;
ocode = LT_EXPR;
break;
case CPP_GREATER:
oprec = PREC_REL;
ocode = GT_EXPR;
break;
case CPP_LESS_EQ:
oprec = PREC_REL;
ocode = LE_EXPR;
break;
case CPP_GREATER_EQ:
oprec = PREC_REL;
ocode = GE_EXPR;
break;
case CPP_EQ_EQ:
oprec = PREC_EQ;
ocode = EQ_EXPR;
break;
case CPP_NOT_EQ:
oprec = PREC_EQ;
ocode = NE_EXPR;
break;
case CPP_AND:
oprec = PREC_BITAND;
ocode = BIT_AND_EXPR;
break;
case CPP_XOR:
oprec = PREC_BITXOR;
ocode = BIT_XOR_EXPR;
break;
case CPP_OR:
oprec = PREC_BITOR;
ocode = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
oprec = PREC_LOGAND;
ocode = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
oprec = PREC_LOGOR;
ocode = TRUTH_ORIF_EXPR;
break;
default:
/* Not a binary operator, so end of the binary
expression. */
goto out;
}
c_parser_consume_token (parser);
while (oprec <= stack[sp].prec)
POP;
switch (ocode)
{
case TRUTH_ANDIF_EXPR:
stack[sp].expr
= default_function_array_conversion (stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(default_conversion (stack[sp].expr.value));
skip_evaluation += stack[sp].expr.value == truthvalue_false_node;
break;
case TRUTH_ORIF_EXPR:
stack[sp].expr
= default_function_array_conversion (stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(default_conversion (stack[sp].expr.value));
skip_evaluation += stack[sp].expr.value == truthvalue_true_node;
break;
default:
break;
}
sp++;
stack[sp].expr = c_parser_cast_expression (parser, NULL);
/* APPLE LOCAL begin radar 4426814 */
if (c_dialect_objc() && flag_objc_gc)
/* APPLE LOCAL radar 5276085 */
stack[sp].expr.value = objc_build_weak_reference_tree (stack[sp].expr.value);
/* APPLE LOCAL end radar 4426814 */
stack[sp].prec = oprec;
stack[sp].op = ocode;
}
out:
while (sp > 0)
POP;
return stack[0].expr;
#undef POP
}
/* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
cast-expression:
unary-expression
( type-name ) unary-expression
*/
static struct c_expr
c_parser_cast_expression (c_parser *parser, struct c_expr *after)
{
gcc_assert (!after || c_dialect_objc ());
if (after)
return c_parser_postfix_expression_after_primary (parser, *after);
/* If the expression begins with a parenthesized type name, it may
be either a cast or a compound literal; we need to see whether
the next character is '{' to tell the difference. If not, it is
an unary expression. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
struct c_type_name *type_name;
struct c_expr ret;
struct c_expr expr;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
/* Save casted types in the function's used types hash table. */
used_types_insert (type_name->specs->type);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_postfix_expression_after_paren_type (parser,
type_name);
expr = c_parser_cast_expression (parser, NULL);
expr = default_function_array_conversion (expr);
ret.value = c_cast_expr (type_name, expr.value);
ret.original_code = ERROR_MARK;
return ret;
}
else
return c_parser_unary_expression (parser);
}
/* Parse an unary expression (C90 6.3.3, C99 6.5.3).
unary-expression:
postfix-expression
++ unary-expression
-- unary-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-name )
unary-operator: one of
& * + - ~ !
GNU extensions:
unary-expression:
__alignof__ unary-expression
__alignof__ ( type-name )
&& identifier
unary-operator: one of
__extension__ __real__ __imag__
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
static struct c_expr
c_parser_unary_expression (c_parser *parser)
{
int ext;
struct c_expr ret, op;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS_PLUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (PREINCREMENT_EXPR, op);
case CPP_MINUS_MINUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (PREDECREMENT_EXPR, op);
case CPP_AND:
c_parser_consume_token (parser);
return parser_build_unary_op (ADDR_EXPR,
c_parser_cast_expression (parser, NULL));
case CPP_MULT:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
ret.value = build_indirect_ref (op.value, "unary *");
ret.original_code = ERROR_MARK;
return ret;
case CPP_PLUS:
c_parser_consume_token (parser);
if (!c_dialect_objc () && !in_system_header)
warning (OPT_Wtraditional,
"traditional C rejects the unary plus operator");
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (CONVERT_EXPR, op);
case CPP_MINUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (NEGATE_EXPR, op);
case CPP_COMPL:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (BIT_NOT_EXPR, op);
case CPP_NOT:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (TRUTH_NOT_EXPR, op);
case CPP_AND_AND:
/* Refer to the address of a label as a pointer. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ret.value = finish_label_address_expr
(c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected identifier");
ret.value = error_mark_node;
}
ret.original_code = ERROR_MARK;
return ret;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_SIZEOF:
return c_parser_sizeof_expression (parser);
case RID_ALIGNOF:
return c_parser_alignof_expression (parser);
case RID_EXTENSION:
c_parser_consume_token (parser);
ext = disable_extension_diagnostics ();
ret = c_parser_cast_expression (parser, NULL);
restore_extension_diagnostics (ext);
return ret;
case RID_REALPART:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (REALPART_EXPR, op);
case RID_IMAGPART:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (IMAGPART_EXPR, op);
default:
return c_parser_postfix_expression (parser);
}
default:
return c_parser_postfix_expression (parser);
}
}
/* Parse a sizeof expression. */
static struct c_expr
c_parser_sizeof_expression (c_parser *parser)
{
struct c_expr expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_sizeof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either sizeof ( type-name ) or sizeof unary-expression
starting with a compound literal. */
struct c_type_name *type_name;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
skip_evaluation--;
in_sizeof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
goto sizeof_expr;
}
/* sizeof ( type-name ). */
skip_evaluation--;
in_sizeof--;
if (type_name->declarator->kind == cdk_array
&& type_name->declarator->u.array.vla_unspec_p)
{
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than a declaration");
}
return c_expr_sizeof_type (type_name);
}
else
{
expr = c_parser_unary_expression (parser);
sizeof_expr:
skip_evaluation--;
in_sizeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error ("%<sizeof%> applied to a bit-field");
return c_expr_sizeof_expr (expr);
}
}
/* Parse an alignof expression. */
static struct c_expr
c_parser_alignof_expression (c_parser *parser)
{
struct c_expr expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_alignof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either __alignof__ ( type-name ) or __alignof__
unary-expression starting with a compound literal. */
struct c_type_name *type_name;
struct c_expr ret;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
skip_evaluation--;
in_alignof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
goto alignof_expr;
}
/* alignof ( type-name ). */
skip_evaluation--;
in_alignof--;
ret.value = c_alignof (groktypename (type_name));
ret.original_code = ERROR_MARK;
return ret;
}
else
{
struct c_expr ret;
expr = c_parser_unary_expression (parser);
alignof_expr:
skip_evaluation--;
in_alignof--;
ret.value = c_alignof_expr (expr.value);
ret.original_code = ERROR_MARK;
return ret;
}
}
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2).
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( argument-expression-list[opt] )
postfix-expression . identifier
APPLE LOCAL CW asm blocks
typedef-name . identifier
postfix-expression -> identifier
postfix-expression ++
postfix-expression --
( type-name ) { initializer-list }
( type-name ) { initializer-list , }
APPLE LOCAL CW asm blocks
primary-expression PTR postfix-expression
argument-expression-list:
argument-expression
argument-expression-list , argument-expression
primary-expression:
APPLE LOCAL CW asm blocks
.
identifier
APPLE LOCAL CW asm blocks
@identifier
constant
string-literal
( expression )
APPLE LOCAL CW asm blocks
[ expression ]
GNU extensions:
primary-expression:
__func__
(treated as a keyword in GNU C)
__FUNCTION__
__PRETTY_FUNCTION__
( compound-statement )
__builtin_va_arg ( assignment-expression , type-name )
__builtin_offsetof ( type-name , offsetof-member-designator )
__builtin_choose_expr ( assignment-expression ,
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
offsetof-member-designator:
identifier
offsetof-member-designator . identifier
offsetof-member-designator [ expression ]
Objective-C:
primary-expression:
[ objc-receiver objc-message-args ]
@selector ( objc-selector-arg )
@protocol ( identifier )
@encode ( type-name )
objc-string-literal
*/
static struct c_expr
c_parser_postfix_expression (c_parser *parser)
{
struct c_expr expr, e1, e2, e3;
struct c_type_name *t1, *t2;
switch (c_parser_peek_token (parser)->type)
{
case CPP_NUMBER:
/* APPLE LOCAL begin CW asm blocks (in 4.2 ak) */
if (cpp_get_options (parse_in)->h_suffix
&& c_parser_peek_token (parser)->value == error_mark_node)
{
/* This was previously deferred. */
cpp_error (parse_in, CPP_DL_ERROR, "invalid suffix on integer constant");
c_parser_consume_token (parser);
}
/* APPLE LOCAL end CW asm blocks (in 4.2 ak) */
case CPP_CHAR:
case CPP_WCHAR:
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case CPP_STRING:
case CPP_WSTRING:
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = STRING_CST;
c_parser_consume_token (parser);
break;
case CPP_OBJC_STRING:
gcc_assert (c_dialect_objc ());
expr.value
= objc_build_string_object (c_parser_peek_token (parser)->value);
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case CPP_NAME:
/* APPLE LOCAL begin radar 5277239 */
if (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
{
/* CLASS.class_method expression. */
tree receiver, component;
receiver = c_parser_objc_receiver (parser);
/* consume '.' operator */
c_parser_consume_token (parser);
component = c_parser_objc_message_args (parser);
expr.value = objc_build_property_reference_expr (receiver, component);
expr.original_code = ERROR_MARK;
break;
}
/* APPLE LOCAL end radar 5277239 */
if (c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
/* APPLE LOCAL begin CW asm blocks (in 4.2 bf) */
if (inside_iasm_block
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
{
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
}
/* APPLE LOCAL end CW asm blocks (in 4.2 bf) */
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree id = c_parser_peek_token (parser)->value;
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
expr.value = build_external_ref (id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN), loc);
expr.original_code = ERROR_MARK;
}
break;
case CPP_OPEN_PAREN:
/* A parenthesized expression, statement expression or compound
literal. */
if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE)
{
/* A statement expression. */
tree stmt;
c_parser_consume_token (parser);
c_parser_consume_token (parser);
if (cur_stmt_list == NULL)
{
error ("braced-group within expression allowed "
"only inside a function");
parser->error = true;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
stmt = c_begin_stmt_expr ();
c_parser_compound_statement_nostart (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (pedantic)
pedwarn ("ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (stmt);
expr.original_code = ERROR_MARK;
}
else if (c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* A compound literal. ??? Can we actually get here rather
than going directly to
c_parser_postfix_expression_after_paren_type from
elsewhere? */
struct c_type_name *type_name;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (type_name == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
}
else
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
}
else
{
/* A parenthesized expression. */
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
if (TREE_CODE (expr.value) == MODIFY_EXPR)
TREE_NO_WARNING (expr.value) = 1;
expr.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
expr.value = fname_decl (c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case RID_VA_ARG:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
}
else
{
expr.value = build_va_arg (e1.value, groktypename (t1));
expr.original_code = ERROR_MARK;
}
break;
case RID_OFFSETOF:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree type = groktypename (t1);
tree offsetof_ref;
if (type == error_mark_node)
offsetof_ref = error_mark_node;
else
offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node);
/* Parse the second argument to __builtin_offsetof. We
must have one identifier, and beyond that we want to
accept sub structure and sub array references. */
if (c_parser_next_token_is (parser, CPP_NAME))
{
offsetof_ref = build_component_ref
(offsetof_ref, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser,
CPP_OPEN_SQUARE))
{
if (c_parser_next_token_is (parser, CPP_DOT))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser,
CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
offsetof_ref = build_component_ref
(offsetof_ref,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
tree idx;
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
offsetof_ref = build_array_ref (offsetof_ref, idx);
}
}
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = fold_offsetof (offsetof_ref, NULL_TREE);
expr.original_code = ERROR_MARK;
}
break;
case RID_CHOOSE_EXPR:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e2 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e3 = c_parser_expr_no_commas (parser, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree c;
c = fold (e1.value);
if (TREE_CODE (c) != INTEGER_CST)
error ("first argument to %<__builtin_choose_expr%> not"
" a constant");
expr = integer_zerop (c) ? e3 : e2;
}
break;
case RID_TYPES_COMPATIBLE_P:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t2 = c_parser_type_name (parser);
if (t2 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree e1, e2;
e1 = TYPE_MAIN_VARIANT (groktypename (t1));
e2 = TYPE_MAIN_VARIANT (groktypename (t2));
expr.value = comptypes (e1, e2)
? build_int_cst (NULL_TREE, 1)
: build_int_cst (NULL_TREE, 0);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_SELECTOR:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree sel = c_parser_objc_selector_arg (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_selector_expr (sel);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_protocol_expr (id);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_ENCODE:
/* Extension to support C-structures in the archiver. */
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree type = groktypename (t1);
expr.value = objc_build_encode_expr (type);
expr.original_code = ERROR_MARK;
}
break;
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
break;
case CPP_OPEN_SQUARE:
/* APPLE LOCAL begin CW asm blocks */
if (inside_iasm_block)
{
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = iasm_build_bracket (expr.value, NULL_TREE);
expr.original_code = ERROR_MARK;
break;
}
/* APPLE LOCAL end CW asm blocks */
if (c_dialect_objc ())
{
tree receiver, args;
c_parser_consume_token (parser);
receiver = c_parser_objc_receiver (parser);
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = objc_build_message_expr (build_tree_list (receiver,
args));
expr.original_code = ERROR_MARK;
break;
}
/* Else fall through to report error. */
default:
/* APPLE LOCAL begin CW asm blocks */
if (inside_iasm_block)
{
if (c_parser_next_token_is (parser, CPP_DOT))
{
/* (in 4.2 ba) */
c_parser_consume_token (parser);
expr.value = get_identifier (".");
expr.original_code = ERROR_MARK;
break;
}
/* (in 4.2 be) */
if (c_parser_next_token_is (parser, CPP_ATSIGN))
{
tree id;
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
id = prepend_char_identifier (id, '@');
expr.value = build_external_ref (id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN), loc);
expr.original_code = ERROR_MARK;
break;
}
}
/* APPLE LOCAL end CW asm blocks */
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
return c_parser_postfix_expression_after_primary (parser, expr);
}
/* Parse a postfix expression after a parenthesized type name: the
brace-enclosed initializer of a compound literal, possibly followed
by some postfix operators. This is separate because it is not
possible to tell until after the type name whether a cast
expression has a cast or a compound literal, or whether the operand
of sizeof is a parenthesized type name or starts with a compound
literal. */
static struct c_expr
c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_type_name *type_name)
{
tree type;
struct c_expr init;
struct c_expr expr;
start_init (NULL_TREE, NULL, 0);
type = groktypename (type_name);
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error ("compound literal has variable size");
type = error_mark_node;
}
init = c_parser_braced_init (parser, type, false);
finish_init ();
maybe_warn_string_init (type, init);
/* APPLE LOCAL AltiVec (in 4.2 o) */
if (pedantic && TREE_CODE (type) != VECTOR_TYPE && !flag_isoc99)
pedwarn ("ISO C90 forbids compound literals");
expr.value = build_compound_literal (type, init.value);
expr.original_code = ERROR_MARK;
return c_parser_postfix_expression_after_primary (parser, expr);
}
/* Parse a postfix expression after the initial primary or compound
literal; that is, parse a series of postfix operators. */
static struct c_expr
c_parser_postfix_expression_after_primary (c_parser *parser,
struct c_expr expr)
{
tree ident, idx, exprlist;
while (true)
{
/* APPLE LOCAL begin CW asm blocks */
if (inside_iasm_block
&& c_parser_iasm_bol (parser))
return expr;
/* APPLE LOCAL end CW asm blocks */
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_SQUARE:
/* Array reference. */
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = build_array_ref (expr.value, idx);
expr.original_code = ERROR_MARK;
break;
case CPP_OPEN_PAREN:
/* APPLE LOCAL begin CW asm blocks (in 4.2 bd) */
if (inside_iasm_block)
return expr;
/* APPLE LOCAL end CW asm blocks (in 4.2 bd) */
/* Function call. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
exprlist = NULL_TREE;
else
exprlist = c_parser_expr_list (parser, true);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = build_function_call (expr.value, exprlist);
expr.original_code = ERROR_MARK;
break;
case CPP_DOT:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
/* APPLE LOCAL begin CW asm blocks */
if (inside_iasm_block)
{
/* (in 4.2 bf) */
if (c_parser_next_token_is (parser, CPP_NAME)
/* (in 4.2 bc) */
|| c_parser_next_token_is (parser, CPP_NUMBER))
{
tree c = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
expr.value = iasm_c_build_component_ref (expr.value, c);
expr.original_code = ERROR_MARK;
break;
}
}
/* APPLE LOCAL end CW asm blocks */
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (expr.value, ident);
expr.original_code = ERROR_MARK;
break;
case CPP_DEREF:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (build_indirect_ref (expr.value,
"->"), ident);
expr.original_code = ERROR_MARK;
break;
case CPP_PLUS_PLUS:
/* Postincrement. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
expr.value = build_unary_op (POSTINCREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
break;
case CPP_MINUS_MINUS:
/* Postdecrement. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
expr.value = build_unary_op (POSTDECREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
break;
/* APPLE LOCAL begin CW asm blocks (in 4.2 bb) */
case CPP_NAME:
if (inside_iasm_block)
{
tree id = c_parser_peek_token (parser)->value;
struct c_expr e2;
if (strcasecmp (IDENTIFIER_POINTER (id), "ptr") == 0)
{
c_parser_consume_token (parser);
e2 = c_parser_postfix_expression (parser);
expr.value = iasm_ptr_conv (expr.value, e2.value);
expr.original_code = ERROR_MARK;
}
}
return expr;
/* APPLE LOCAL end CW asm blocks */
default:
return expr;
}
}
}
/* Parse an expression (C90 6.3.17, C99 6.5.17).
expression:
assignment-expression
expression , assignment-expression
*/
static struct c_expr
c_parser_expression (c_parser *parser)
{
struct c_expr expr;
expr = c_parser_expr_no_commas (parser, NULL);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
c_parser_consume_token (parser);
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_conversion (next);
expr.value = build_compound_expr (expr.value, next.value);
expr.original_code = COMPOUND_EXPR;
}
return expr;
}
/* Parse an expression and convert functions or arrays to
pointers. */
static struct c_expr
c_parser_expression_conv (c_parser *parser)
{
struct c_expr expr;
expr = c_parser_expression (parser);
expr = default_function_array_conversion (expr);
return expr;
}
/* Parse a non-empty list of expressions. If CONVERT_P, convert
functions and arrays to pointers.
nonempty-expr-list:
assignment-expression
nonempty-expr-list , assignment-expression
*/
static tree
c_parser_expr_list (c_parser *parser, bool convert_p)
{
struct c_expr expr;
tree ret, cur;
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_conversion (expr);
ret = cur = build_tree_list (NULL_TREE, expr.value);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_conversion (expr);
cur = TREE_CHAIN (cur) = build_tree_list (NULL_TREE, expr.value);
}
return ret;
}
/* Parse Objective-C-specific constructs. */
/* Parse an objc-class-definition.
objc-class-definition:
@interface identifier objc-superclass[opt] objc-protocol-refs[opt]
objc-class-instance-variables[opt] objc-methodprotolist @end
@implementation identifier objc-superclass[opt]
objc-class-instance-variables[opt]
@interface identifier ( identifier ) objc-protocol-refs[opt]
objc-methodprotolist @end
@implementation identifier ( identifier )
objc-superclass:
: identifier
"@interface identifier (" must start "@interface identifier (
identifier ) ...": objc-methodprotolist in the first production may
not start with a parenthesized identifier as a declarator of a data
definition with no declaration specifiers if the objc-superclass,
objc-protocol-refs and objc-class-instance-variables are omitted. */
static void
/* APPLE LOCAL radar 4548636 - class attributes. */
c_parser_objc_class_definition (c_parser *parser, tree prefix_attrs)
{
bool iface_p;
tree id1;
tree superclass;
if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE))
iface_p = true;
else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
/* APPLE LOCAL begin radar 4548636 - class attributes. */
{
if (prefix_attrs)
{
error ("attributes may not be specified on an implementation");
prefix_attrs = NULL_TREE;
}
iface_p = false;
}
/* APPLE LOCAL end radar 4548636 - class attributes. */
else
gcc_unreachable ();
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
/* APPLE LOCAL radar 4533974 - ObjC new protocol (in 4.2 v) */
c_parser_error (parser, "expected identifier or protocol references");
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* APPLE LOCAL radar 4965989 */
tree id2 = NULL_TREE;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
/* APPLE LOCAL begin radar 4965989 */
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
{
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
/* APPLE LOCAL end radar 4965989 */
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!iface_p)
{
/* APPLE LOCAL begin radar 4965989 */
if (id2 == NULL_TREE)
{
error ("cannot implement anonymous category");
return;
}
/* APPLE LOCAL end radar 4965989 */
objc_start_category_implementation (id1, id2);
return;
}
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
/* APPLE LOCAL begin radar 4548636 - class attributes. */
if (prefix_attrs)
error ("attributes may not be specified on a category");
/* APPLE LOCAL end radar 4548636 - class attributes. */
objc_start_category_interface (id1, id2, proto);
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 q) */
c_parser_objc_interfacedecllist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
return;
}
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
superclass = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
superclass = NULL_TREE;
if (iface_p)
{
tree proto = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
/* APPLE LOCAL radar 4548636 - class attributes. */
objc_start_class_interface (id1, superclass, proto, prefix_attrs);
}
else
objc_start_class_implementation (id1, superclass);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
c_parser_objc_class_instance_variables (parser);
if (iface_p)
{
objc_continue_interface ();
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 q) */
c_parser_objc_interfacedecllist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
}
else
{
objc_continue_implementation ();
return;
}
}
/* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 s) */
static tree
c_parser_objc_eq_identifier (c_parser *parser)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_EQ))
{
c_parser_error (parser, "expected %<=%>");
return NULL_TREE;
}
/* Consume '=' */
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return NULL_TREE;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return id;
}
/* Parse obj-property-attribute.
*/
static void
c_parser_objc_property_attribute (c_parser *parser)
{
tree id;
if (c_parser_peek_token (parser)->type != CPP_KEYWORD)
{
c_parser_error (parser, "expected a property attribute");
c_parser_consume_token (parser);
return;
}
switch (c_parser_peek_token (parser)->keyword)
{
case RID_READONLY:
c_parser_consume_token (parser);
objc_set_property_attr (1, NULL_TREE);
break;
case RID_GETTER:
c_parser_consume_token (parser);
id = c_parser_objc_eq_identifier (parser);
if (id)
objc_set_property_attr (2, id);
break;
case RID_SETTER:
c_parser_consume_token (parser);
id = c_parser_objc_eq_identifier (parser);
if (id)
objc_set_property_attr (3, id);
/* Consume the ':' which must always follow the setter name. */
if (c_parser_next_token_is (parser, CPP_COLON))
c_parser_consume_token (parser);
break;
/* APPLE LOCAL begin objc new property */
case RID_READWRITE:
c_parser_consume_token (parser);
objc_set_property_attr (9, NULL_TREE);
break;
case RID_ASSIGN:
c_parser_consume_token (parser);
objc_set_property_attr (10, NULL_TREE);
break;
case RID_RETAIN:
c_parser_consume_token (parser);
objc_set_property_attr (11, NULL_TREE);
break;
case RID_COPY:
c_parser_consume_token (parser);
objc_set_property_attr (12, NULL_TREE);
break;
/* APPLE LOCAL end objc new property */
/* APPLE LOCAL begin radar 4947014 - objc atomic property */
case RID_NONATOMIC:
c_parser_consume_token (parser);
objc_set_property_attr (13, NULL_TREE);
break;
/* APPLE LOCAL end radar 4947014 - objc atomic property */
default:
c_parser_error (parser, "expected a property attribute");
c_parser_consume_token (parser);
}
}
static void
c_parser_objc_property_attrlist (c_parser *parser)
{
while (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)
&& c_parser_next_token_is_not (parser, CPP_EOF))
{
c_parser_objc_property_attribute (parser);
if (c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_NAME) /* error */)
c_parser_consume_token (parser);
}
}
static void
c_parser_objc_property_attr_decl (c_parser *parser)
{
if (!c_parser_next_token_is (parser, CPP_OPEN_PAREN))
return;
c_parser_consume_token (parser);
c_parser_objc_property_attrlist (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
static tree
c_parser_component_decl (c_parser *parser)
{
tree decl = c_parser_struct_declaration (parser);
return decl;
}
static void
c_parser_objc_property_declaration (c_parser *parser)
{
tree prop;
c_parser_require_keyword (parser, RID_AT_PROPERTY, "expected %<@property%>");
objc_property_attr_context = 1;
objc_set_property_attr (0, NULL_TREE);
c_parser_objc_property_attr_decl (parser);
objc_property_attr_context = 0;
prop = c_parser_component_decl (parser);
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
prop = nreverse (prop);
for (; prop; prop = TREE_CHAIN (prop))
objc_add_property_variable (copy_node (prop));
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 s) */
/* APPLE LOCAL begin objc new property */
static void
c_parser_objc_atsynthesize_declaration (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE));
c_parser_consume_token (parser);
while (true)
{
tree prop_id, ivar_id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
prop_id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
ivar_id = c_parser_next_token_is (parser, CPP_EQ)
? c_parser_objc_eq_identifier (parser)
: NULL_TREE;
list = chainon (list, build_tree_list (ivar_id, prop_id));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_property_impl (1, list);
return;
}
static void
c_parser_objc_atdynamic_declaration (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC));
c_parser_consume_token (parser);
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_property_impl (2, list);
return;
}
/* APPLE LOCAL end objc new property */
/* Parse objc-class-instance-variables.
objc-class-instance-variables:
{ objc-instance-variable-decl-list[opt] }
objc-instance-variable-decl-list:
objc-visibility-spec
objc-instance-variable-decl ;
;
objc-instance-variable-decl-list objc-visibility-spec
objc-instance-variable-decl-list objc-instance-variable-decl ;
objc-instance-variable-decl-list ;
objc-visibility-spec:
@private
@protected
@public
objc-instance-variable-decl:
struct-declaration
*/
static void
c_parser_objc_class_instance_variables (c_parser *parser)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
while (c_parser_next_token_is_not (parser, CPP_EOF))
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (pedantic)
pedwarn ("extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the instance variables. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Parse any objc-visibility-spec. */
if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE))
{
c_parser_consume_token (parser);
objc_set_visibility (2);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED))
{
c_parser_consume_token (parser);
objc_set_visibility (0);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC))
{
c_parser_consume_token (parser);
objc_set_visibility (1);
continue;
}
/* APPLE LOCAL begin radar 4564694 */
else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE))
{
c_parser_consume_token (parser);
objc_set_visibility (3);
continue;
}
/* APPLE LOCAL end radar 4564694 */
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations. */
decls = c_parser_struct_declaration (parser);
{
/* Comma-separated instance variables are chained together in
reverse order; add them one by one. */
tree ivar = nreverse (decls);
for (; ivar; ivar = TREE_CHAIN (ivar))
objc_add_instance_variable (copy_node (ivar));
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse an objc-class-declaration.
objc-class-declaration:
@class identifier-list ;
*/
static void
c_parser_objc_class_declaration (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_class (list);
}
/* Parse an objc-alias-declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
*/
static void
c_parser_objc_alias_declaration (c_parser *parser)
{
tree id1, id2;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_alias (id1, id2);
}
/* Parse an objc-protocol-definition.
objc-protocol-definition:
@protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end
@protocol identifier-list ;
"@protocol identifier ;" should be resolved as "@protocol
identifier-list ;": objc-methodprotolist may not start with a
semicolon in the first alternative if objc-protocol-refs are
omitted. */
static void
/* APPLE LOCAL radar 4947311 - protocol attributes */
c_parser_objc_protocol_definition (c_parser *parser, tree attributes)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON)
{
tree list = NULL_TREE;
/* Any identifiers, including those declared as type names, are
OK here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* APPLE LOCAL radar 4947311 - protocol attributes */
objc_declare_protocols (list, attributes);
}
else
{
tree id = c_parser_peek_token (parser)->value;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_pq_context = 1;
/* APPLE LOCAL radar 4947311 - protocol attributes */
objc_start_protocol (id, proto, attributes);
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 r) */
c_parser_objc_interfacedecllist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_pq_context = 0;
objc_finish_interface ();
}
}
/* Parse an objc-method-type.
objc-method-type:
+
-
*/
static enum tree_code
c_parser_objc_method_type (c_parser *parser)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
c_parser_consume_token (parser);
return PLUS_EXPR;
case CPP_MINUS:
c_parser_consume_token (parser);
return MINUS_EXPR;
default:
gcc_unreachable ();
}
}
/* Parse an objc-method-definition.
objc-method-definition:
objc-method-type objc-method-decl ;[opt] compound-statement
*/
static void
c_parser_objc_method_definition (c_parser *parser)
{
enum tree_code type = c_parser_objc_method_type (parser);
tree decl;
objc_set_method_type (type);
objc_pq_context = 1;
decl = c_parser_objc_method_decl (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
if (pedantic)
pedwarn ("extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_error (parser, "expected %<{%>");
return;
}
/* APPLE LOCAL begin fix -fnon-lvalue-assign (in 4.2) */
/* in_gimple_form is set at beginning of last pass of previous function build.
Must reset it here since we are building the parse tree here. */
in_gimple_form = 0;
/* APPLE LOCAL end -fnon-lvalue-assign (in 4.2) */
objc_pq_context = 0;
/* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 a) */
objc_start_method_definition (decl, objc_method_attributes);
objc_method_attributes = NULL_TREE;
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 a) */
add_stmt (c_parser_compound_statement (parser));
objc_finish_method_definition (current_function_decl);
}
/* APPLE LOCAL begin C* language (in 4.2 w) */
/* True iff the gioven TOKEN starts a methodproto. */
static bool
c_token_starts_methodproto (c_token *token)
{
return token->type == CPP_PLUS
|| token->type == CPP_MINUS
|| (token->type == CPP_KEYWORD
&& (token->keyword == RID_AT_REQUIRED
|| token->keyword == RID_AT_OPTIONAL));
}
/* APPLE LOCAL end C* language (in 4.2 w) */
/* Parse an objc-methodprotolist.
objc-methodprotolist:
empty
objc-methodprotolist objc-methodproto
objc-methodprotolist declaration
objc-methodprotolist ;
The declaration is a data definition, which may be missing
declaration specifiers under the same rules and diagnostics as
other data definitions outside functions, and the stray semicolon
is diagnosed the same way as a stray semicolon outside a
function. */
static void
/* APPLE LOCAL C* property (Radar 4436866) (in 4.2 b) */
c_parser_objc_interfacedecllist (c_parser *parser)
{
while (true)
{
/* APPLE LOCAL begin C* property (Radar 4436866) (in 4.2 b) */
c_token *token;
token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& token->keyword == RID_AT_PROPERTY)
{
c_parser_objc_property_declaration (parser);
continue;
}
/* APPLE LOCAL end C* property (Radar 4436866) (in 4.2 b) */
/* APPLE LOCAL begin C* language (in 4.2 w) */
if (c_token_starts_methodproto (token))
{
c_parser_objc_methodproto (parser);
continue;
}
/* APPLE LOCAL end C* language (in 4.2 w) */
/* The list is terminated by @end. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
if (pedantic)
pedwarn ("ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
/* APPLE LOCAL begin C* language (in 4.2 w) */
/* CPP_PLUS and CPP_MINUS deleted */
/* APPLE LOCAL end C* language (in 4.2 w) */
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external);
break;
case CPP_EOF:
return;
default:
if (c_parser_next_token_is_keyword (parser, RID_AT_END))
return;
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, false, true, false, true, NULL);
break;
}
}
}
/* Parse an objc-methodproto.
objc-methodproto:
objc-method-type objc-method-decl ;
*/
static void
c_parser_objc_methodproto (c_parser *parser)
{
/* APPLE LOCAL C* language */
enum tree_code type;
tree decl;
/* APPLE LOCAL begin C* language */
if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED))
{
objc_set_method_opt (0);
c_parser_consume_token (parser);
return;
}
if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL))
{
objc_set_method_opt (1);
c_parser_consume_token (parser);
return;
}
/* APPLE LOCAL begin C* language */
/* APPLE LOCAL C* language */
type = c_parser_objc_method_type (parser);
objc_set_method_type (type);
/* Remember protocol qualifiers in prototypes. */
objc_pq_context = 1;
decl = c_parser_objc_method_decl (parser);
/* Forget protocol qualifiers here. */
objc_pq_context = 0;
/* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 c) */
objc_add_method_declaration (decl, objc_method_attributes);
objc_method_attributes = NULL_TREE;
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 c) */
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse an objc-method-decl.
objc-method-decl:
( objc-type-name ) objc-selector
objc-selector
( objc-type-name ) objc-keyword-selector objc-optparmlist
objc-keyword-selector objc-optparmlist
objc-keyword-selector:
objc-keyword-decl
objc-keyword-selector objc-keyword-decl
objc-keyword-decl:
objc-selector : ( objc-type-name ) identifier
objc-selector : identifier
: ( objc-type-name ) identifier
: identifier
objc-optparmlist:
objc-optparms objc-optellipsis
objc-optparms:
empty
objc-opt-parms , parameter-declaration
objc-optellipsis:
empty
, ...
*/
static tree
c_parser_objc_method_decl (c_parser *parser)
{
tree type = NULL_TREE;
tree sel;
tree parms = NULL_TREE;
bool ellipsis = false;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
type = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
sel = c_parser_objc_selector (parser);
/* If there is no selector, or a colon follows, we have an
objc-keyword-selector. If there is a selector, and a colon does
not follow, that selector ends the objc-method-decl. */
if (!sel || c_parser_next_token_is (parser, CPP_COLON))
{
tree tsel = sel;
tree list = NULL_TREE;
while (true)
{
/* APPLE LOCAL radar 4157812 */
tree attr = NULL_TREE;
tree atype = NULL_TREE, id, keyworddecl;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
atype = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
/* APPLE LOCAL begin radar 4157812 */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
attr = c_parser_attributes (parser);
/* APPLE LOCAL end radar 4157812 */
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
/* APPLE LOCAL radar 4157812 */
keyworddecl = objc_build_keyword_decl (tsel, atype, id, attr);
list = chainon (list, keyworddecl);
tsel = c_parser_objc_selector (parser);
if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
/* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 y) */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
objc_method_attributes = c_parser_attributes (parser);
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */
/* Parse the optional parameter list. Optional Objective-C
method parameters follow the C syntax, and may include '...'
to denote a variable number of arguments. */
parms = make_node (TREE_LIST);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis = true;
c_parser_consume_token (parser);
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */
if (objc_method_attributes)
error ("method attributes must be specified at the end only");
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
objc_method_attributes = c_parser_attributes (parser);
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */
break;
}
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
break;
parms = chainon (parms,
build_tree_list (NULL_TREE, grokparm (parm)));
}
sel = list;
}
/* APPLE LOCAL begin radar 3803157 - objc attribute (in 4.2 y) */
else
{
gcc_assert (objc_method_attributes == NULL_TREE);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
objc_method_attributes = c_parser_attributes (parser);
}
/* APPLE LOCAL end radar 3803157 - objc attribute (in 4.2 y) */
/* APPLE LOCAL begin radar 4157812 */
if (sel == NULL)
{
c_parser_error (parser, "objective-c method declaration is expected");
return error_mark_node;
}
/* APPLE LOCAL end radar 4157812 */
return objc_build_method_signature (type, sel, parms, ellipsis);
}
/* Parse an objc-type-name.
objc-type-name:
objc-type-qualifiers[opt] type-name
objc-type-qualifiers[opt]
objc-type-qualifiers:
objc-type-qualifier
objc-type-qualifiers objc-type-qualifier
objc-type-qualifier: one of
in out inout bycopy byref oneway
*/
static tree
c_parser_objc_type_name (c_parser *parser)
{
tree quals = NULL_TREE;
struct c_type_name *typename = NULL;
tree type = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& (token->keyword == RID_IN
|| token->keyword == RID_OUT
|| token->keyword == RID_INOUT
|| token->keyword == RID_BYCOPY
|| token->keyword == RID_BYREF
|| token->keyword == RID_ONEWAY))
{
/* APPLE LOCAL radar 4301047 (in 4.2 z) */
quals = chainon (build_tree_list (NULL_TREE, token->value), quals);
c_parser_consume_token (parser);
}
else
break;
}
if (c_parser_next_token_starts_typename (parser))
typename = c_parser_type_name (parser);
if (typename)
type = groktypename (typename);
return build_tree_list (quals, type);
}
/* Parse objc-protocol-refs.
objc-protocol-refs:
< identifier-list >
*/
static tree
c_parser_objc_protocol_refs (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is (parser, CPP_LESS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_require (parser, CPP_GREATER, "expected %<>%>");
return list;
}
/* Parse an objc-try-catch-statement.
objc-try-catch-statement:
@try compound-statement objc-catch-list[opt]
@try compound-statement objc-catch-list[opt] @finally compound-statement
objc-catch-list:
@catch ( parameter-declaration ) compound-statement
objc-catch-list @catch ( parameter-declaration ) compound-statement
*/
static void
c_parser_objc_try_catch_statement (c_parser *parser)
{
location_t loc;
tree stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
stmt = c_parser_compound_statement (parser);
objc_begin_try_stmt (loc, stmt);
while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
break;
/* APPLE LOCAL begin radar 2848255 */
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
/* @catch (...) */
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
objc_begin_catch_clause (NULL_TREE);
}
else
{
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
objc_begin_catch_clause (grokparm (parm));
}
/* APPLE LOCAL end radar 2848255 */
if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
c_parser_compound_statement_nostart (parser);
objc_finish_catch_clause ();
}
if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY))
{
location_t finloc;
tree finstmt;
c_parser_consume_token (parser);
finloc = c_parser_peek_token (parser)->location;
finstmt = c_parser_compound_statement (parser);
objc_build_finally_clause (finloc, finstmt);
}
objc_finish_try_stmt ();
}
/* Parse an objc-synchronized-statement.
objc-synchronized-statement:
@synchronized ( expression ) compound-statement
*/
static void
c_parser_objc_synchronized_statement (c_parser *parser)
{
location_t loc;
tree expr, stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
expr = error_mark_node;
stmt = c_parser_compound_statement (parser);
objc_build_synchronized (loc, expr, stmt);
}
/* Parse an objc-selector; return NULL_TREE without an error if the
next token is not an objc-selector.
objc-selector:
identifier
one of
enum struct union if else while do for switch case default
break continue return goto asm sizeof typeof __alignof
unsigned long const short volatile signed restrict _Complex
in out inout bycopy byref oneway int char float double void _Bool
??? Why this selection of keywords but not, for example, storage
class specifiers? */
static tree
c_parser_objc_selector (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
tree value = token->value;
if (token->type == CPP_NAME)
{
c_parser_consume_token (parser);
return value;
}
if (token->type != CPP_KEYWORD)
return NULL_TREE;
switch (token->keyword)
{
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_IF:
case RID_ELSE:
case RID_WHILE:
case RID_DO:
case RID_FOR:
case RID_SWITCH:
case RID_CASE:
case RID_DEFAULT:
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
case RID_ASM:
case RID_SIZEOF:
case RID_TYPEOF:
case RID_ALIGNOF:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_SHORT:
case RID_VOLATILE:
case RID_SIGNED:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_IN:
case RID_OUT:
case RID_INOUT:
case RID_BYCOPY:
case RID_BYREF:
case RID_ONEWAY:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_BOOL:
c_parser_consume_token (parser);
return value;
default:
return NULL_TREE;
}
}
/* Parse an objc-selector-arg.
objc-selector-arg:
objc-selector
objc-keywordname-list
objc-keywordname-list:
objc-keywordname
objc-keywordname-list objc-keywordname
objc-keywordname:
objc-selector :
:
*/
static tree
c_parser_objc_selector_arg (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
list = chainon (list, build_tree_list (sel, NULL_TREE));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-receiver.
objc-receiver:
expression
class-name
type-name
*/
static tree
c_parser_objc_receiver (c_parser *parser)
{
if (c_parser_peek_token (parser)->type == CPP_NAME
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return objc_get_class_reference (id);
}
return c_parser_expression (parser).value;
}
/* Parse objc-message-args.
objc-message-args:
objc-selector
objc-keywordarg-list
objc-keywordarg-list:
objc-keywordarg
objc-keywordarg-list objc-keywordarg
objc-keywordarg:
objc-selector : objc-keywordexpr
: objc-keywordexpr
*/
static tree
c_parser_objc_message_args (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
tree keywordexpr;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
keywordexpr = c_parser_objc_keywordexpr (parser);
list = chainon (list, build_tree_list (sel, keywordexpr));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-keywordexpr.
objc-keywordexpr:
nonempty-expr-list
*/
static tree
c_parser_objc_keywordexpr (c_parser *parser)
{
tree list = c_parser_expr_list (parser, true);
if (TREE_CHAIN (list) == NULL_TREE)
{
/* Just return the expression, remove a level of
indirection. */
return TREE_VALUE (list);
}
else
{
/* We have a comma expression, we will collapse later. */
return list;
}
}
/* Handle pragmas. Some OpenMP pragmas are associated with, and therefore
should be considered, statements. ALLOW_STMT is true if we're within
the context of a function and such pragmas are to be allowed. Returns
true if we actually parsed such a pragma. */
static bool
c_parser_pragma (c_parser *parser, enum pragma_context context)
{
unsigned int id;
id = c_parser_peek_token (parser)->pragma_kind;
gcc_assert (id != PRAGMA_NONE);
switch (id)
{
case PRAGMA_OMP_BARRIER:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp barrier%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_barrier (parser);
return false;
case PRAGMA_OMP_FLUSH:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp flush%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_flush (parser);
return false;
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
case PRAGMA_OMP_SECTION:
error ("%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_GCC_PCH_PREPROCESS:
c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
default:
if (id < PRAGMA_FIRST_EXTERNAL)
{
if (context == pragma_external)
{
bad_stmt:
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_construct (parser);
return true;
}
break;
}
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
/* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
c_token *tok = c_parser_peek_token (the_parser);
enum cpp_ttype ret = tok->type;
*value = tok->value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else
{
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
c_parser_consume_token (the_parser);
}
return ret;
}
static void
c_parser_pragma_pch_preprocess (c_parser *parser)
{
tree name = NULL;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_STRING))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected string literal");
c_parser_skip_to_pragma_eol (parser);
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
}
/* OpenMP 2.5 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
c_parser_omp_clause_name (c_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (c_parser_next_token_is_keyword (parser, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'c':
if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'p':
if (!strcmp ("private", p))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
c_parser_consume_token (parser);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum tree_code code, const char *name)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error ("too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
c_parser_omp_variable_list (c_parser *parser, enum omp_clause_code kind,
tree list)
{
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
c_parser_error (parser, "expected identifier");
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
if (t == NULL_TREE)
undeclared_variable (c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location);
else if (t == error_mark_node)
;
else if (kind != 0)
{
tree u = build_omp_clause (kind);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (t, NULL_TREE, list);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
c_parser_omp_var_list_parens (c_parser *parser, enum tree_code kind, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
list = c_parser_omp_variable_list (parser, kind, list);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 2.5:
copyin ( variable-list ) */
static tree
c_parser_omp_clause_copyin (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list);
}
/* OpenMP 2.5:
copyprivate ( variable-list ) */
static tree
c_parser_omp_clause_copyprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list);
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
c_parser_omp_clause_default (c_parser *parser, tree list)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
c_parser_consume_token (parser);
}
else
{
invalid_kind:
c_parser_error (parser, "expected %<none%> or %<shared%>");
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
firstprivate ( variable-list ) */
static tree
c_parser_omp_clause_firstprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list);
}
/* OpenMP 2.5:
if ( expression ) */
static tree
c_parser_omp_clause_if (c_parser *parser, tree list)
{
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree t = c_parser_paren_condition (parser);
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if");
c = build_omp_clause (OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
else
c_parser_error (parser, "expected %<(%>");
return list;
}
/* OpenMP 2.5:
lastprivate ( variable-list ) */
static tree
c_parser_omp_clause_lastprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list);
}
/* OpenMP 2.5:
nowait */
static tree
c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
c_parser_omp_clause_num_threads (c_parser *parser, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
tree c, t = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2 (LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (c == boolean_true_node)
{
warning (0, "%<num_threads%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 2.5:
ordered */
static tree
c_parser_omp_clause_ordered (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
c = build_omp_clause (OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
private ( variable-list ) */
static tree
c_parser_omp_clause_private (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list);
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && || */
static tree
c_parser_omp_clause_reduction (c_parser *parser, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
enum tree_code code;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, or %<||%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
tree nl, c;
nl = c_parser_omp_variable_list (parser, OMP_CLAUSE_REDUCTION, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
list = nl;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime
*/
static tree
c_parser_omp_clause_schedule (c_parser *parser, tree list)
{
tree c, t;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
c = build_omp_clause (OMP_CLAUSE_SCHEDULE);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else
goto invalid_kind;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
t = c_parser_expr_no_commas (parser, NULL).value;
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error ("schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
else
c_parser_error (parser, "expected integer expression");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenMP 2.5:
shared ( variable-list ) */
static tree
c_parser_omp_clause_shared (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
c_parser_omp_all_clauses (c_parser *parser, unsigned int mask,
const char *where)
{
tree clauses = NULL;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
const pragma_omp_clause c_kind = c_parser_omp_clause_name (parser);
const char *c_name;
tree prev = clauses;
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = c_parser_omp_clause_copyprivate (parser, clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = c_parser_omp_clause_lastprivate (parser, clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = c_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = c_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = c_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = c_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = c_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0 && !parser->error)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error ("%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
c_parser_statement calls add_stmt. */
static tree
c_parser_omp_structured_block (c_parser *parser)
{
tree stmt = push_stmt_list ();
c_parser_statement (parser);
return pop_stmt_list (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type. */
static void
c_parser_omp_atomic (c_parser *parser)
{
tree lhs, rhs;
tree stmt;
enum tree_code code;
c_parser_skip_to_pragma_eol (parser);
lhs = c_parser_unary_expression (parser).value;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
saw_error:
c_parser_skip_to_end_of_block_or_statement (parser);
return;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = PLUS_EXPR;
rhs = integer_one_node;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = MINUS_EXPR;
rhs = integer_one_node;
break;
default:
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
default:
c_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
c_parser_consume_token (parser);
rhs = c_parser_expression (parser).value;
break;
}
stmt = c_finish_omp_atomic (code, lhs, rhs);
if (stmt != error_mark_node)
add_stmt (stmt);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* OpenMP 2.5:
# pragma omp barrier new-line
*/
static void
c_parser_omp_barrier (c_parser *parser)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block
*/
static tree
c_parser_omp_critical (c_parser *parser)
{
tree stmt, name = NULL;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_error (parser, "expected identifier");
}
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
stmt = c_parser_omp_structured_block (parser);
return c_finish_omp_critical (stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
c_parser_omp_flush (c_parser *parser)
{
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
c_parser_omp_var_list_parens (parser, 0, NULL);
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_flush ();
}
/* Parse the restricted form of the for statment allowed by OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private. */
static tree
c_parser_omp_for_loop (c_parser *parser)
{
tree decl, cond, incr, save_break, save_cont, body, init;
location_t loc;
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_error (parser, "for statement expected");
return NULL;
}
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return NULL;
/* Parse the initialization declaration or expression. */
if (c_parser_next_token_starts_declspecs (parser))
{
/* APPLE LOCAL radar 4708210 (for_objc_collection in 4.2) */
c_parser_declaration_or_fndef (parser, true, true, true, true, NULL);
decl = check_for_loop_decls ();
if (decl == NULL)
goto error_init;
init = decl;
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_EQ)
{
decl = c_parser_postfix_expression (parser).value;
c_parser_require (parser, CPP_EQ, "expected %<=%>");
init = c_parser_expr_no_commas (parser, NULL).value;
init = build_modify_expr (decl, NOP_EXPR, init);
init = c_process_expr_stmt (init);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
goto error_init;
/* Parse the loop condition. */
cond = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
{
cond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (cond);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, input_location);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* Parse the increment expression. */
incr = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
incr = c_process_expr_stmt (c_parser_expression (parser).value);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
parse_body:
save_break = c_break_label;
c_break_label = size_one_node;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = push_stmt_list ();
add_stmt (c_parser_c99_block_statement (parser));
if (c_cont_label)
add_stmt (build1 (LABEL_EXPR, void_type_node, c_cont_label));
body = pop_stmt_list (body);
c_break_label = save_break;
c_cont_label = save_cont;
/* Only bother calling c_finish_omp_for if we havn't already generated
an error from the initialization parsing. */
if (decl != NULL && decl != error_mark_node && init != error_mark_node)
return c_finish_omp_for (loc, decl, init, cond, incr, body, NULL);
return NULL;
error_init:
c_parser_error (parser, "expected iteration declaration or initialization");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
decl = init = cond = incr = NULL_TREE;
goto parse_body;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
*/
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_for (c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (parser);
if (ret)
OMP_FOR_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block
*/
static tree
c_parser_omp_master (c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_master (c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
*/
static tree
c_parser_omp_ordered (c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_ordered (c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
c_parser_omp_sections_scope (c_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
location_t loc;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return NULL_TREE;
}
stmt = push_stmt_list ();
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = push_stmt_list ();
while (1)
{
c_parser_statement (parser);
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
}
substmt = pop_stmt_list (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
while (1)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
error_suppress = false;
}
else if (!error_suppress)
{
error ("expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = c_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<#pragma omp section%> or %<}%>");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
return add_stmt (stmt);
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope
*/
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_sections (c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line
*/
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
c_parser_omp_parallel (c_parser *parser)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "sections") == 0)
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
block = c_begin_omp_parallel ();
c_parser_statement (parser);
stmt = c_finish_omp_parallel (clauses, block);
break;
case PRAGMA_OMP_PARALLEL_FOR:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
stmt = c_parser_omp_for_loop (parser);
if (stmt)
OMP_FOR_CLAUSES (stmt) = ws_clause;
stmt = c_finish_omp_parallel (par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
stmt = c_parser_omp_sections_scope (parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
stmt = c_finish_omp_parallel (par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
default:
gcc_unreachable ();
}
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block
*/
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_single (c_parser *parser)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single");
OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* Main entry point to parsing most OpenMP pragmas. */
static void
c_parser_omp_construct (c_parser *parser)
{
enum pragma_kind p_kind;
location_t loc;
tree stmt;
loc = c_parser_peek_token (parser)->location;
p_kind = c_parser_peek_token (parser)->pragma_kind;
c_parser_consume_pragma (parser);
/* For all constructs below except #pragma omp atomic
MUST_NOT_THROW catch handlers are needed when exceptions
are enabled. */
if (p_kind != PRAGMA_OMP_ATOMIC)
c_maybe_initialize_eh ();
switch (p_kind)
{
case PRAGMA_OMP_ATOMIC:
c_parser_omp_atomic (parser);
return;
case PRAGMA_OMP_CRITICAL:
stmt = c_parser_omp_critical (parser);
break;
case PRAGMA_OMP_FOR:
stmt = c_parser_omp_for (parser);
break;
case PRAGMA_OMP_MASTER:
stmt = c_parser_omp_master (parser);
break;
case PRAGMA_OMP_ORDERED:
stmt = c_parser_omp_ordered (parser);
break;
case PRAGMA_OMP_PARALLEL:
stmt = c_parser_omp_parallel (parser);
break;
case PRAGMA_OMP_SECTIONS:
stmt = c_parser_omp_sections (parser);
break;
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (parser);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, loc);
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
c_parser_omp_threadprivate (c_parser *parser)
{
tree vars, t;
c_parser_consume_pragma (parser);
vars = c_parser_omp_var_list_parens (parser, 0, NULL);
if (!targetm.have_tls)
sorry ("threadprivate variables not supported in this target");
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v))
error ("%qE declared %<threadprivate%> after first use", v);
else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v))
error ("automatic variable %qE cannot be %<threadprivate%>", v);
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error ("%<threadprivate%> %qE has incomplete type", v);
else
{
if (! DECL_THREAD_LOCAL_P (v))
{
DECL_TLS_MODEL (v) = decl_default_tls_model (v);
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
C_DECL_THREADPRIVATE_P (v) = 1;
}
}
c_parser_skip_to_pragma_eol (parser);
}
/* Parse a single source file. */
void
c_parse_file (void)
{
/* Use local storage to begin. If the first token is a pragma, parse it.
If it is #pragma GCC pch_preprocess, then this will load a PCH file
which will cause garbage collection. */
c_parser tparser;
memset (&tparser, 0, sizeof tparser);
the_parser = &tparser;
if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS)
c_parser_pragma_pch_preprocess (&tparser);
the_parser = GGC_NEW (c_parser);
*the_parser = tparser;
c_parser_translation_unit (the_parser);
the_parser = NULL;
}
/* APPLE LOCAL begin CW asm blocks */
static void c_parser_iasm_statement (c_parser*);
static tree c_parser_iasm_identifier_or_number (c_parser*);
static bool
c_parser_iasm_bol (c_parser *parser)
{
location_t loc;
c_token *token;
/* We can't use c_parser_peek_token here, as it will give errors for things like
1st in MS-stype asm. */
if (parser->tokens_avail == 0)
{
loc = input_location;
parser->tokens_avail = 1;
c_lex_one_token (&parser->tokens[0], parser);
input_location = loc;
}
token = &parser->tokens[0];
return (token->flags & BOL) != 0;
}
/* (in 4.2 ao) */
static void
c_parser_iasm_maybe_skip_comments (c_parser *parser)
{
if (flag_ms_asms
&& c_parser_next_token_is (parser, CPP_SEMICOLON))
{
/* Eat the ';', then skip rest of characters on this line. */
c_parser_consume_token (parser);
gcc_assert (parser->tokens_avail == 0);
iasm_skip_to_eol ();
}
}
/* (in 4.2 ap) */
/* (in 4.2 ax) */
/* Parse an asm line. The first token cannot be at the beginning of
the line. */
static void
c_parser_iasm_statement_seq_opt (c_parser* parser)
{
int check;
/* Scan statements until there aren't any more. */
while (true)
{
check = 0;
/* Semicolons divide up individual statements. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
/* ; denotes comments in MS-style asms. */
if (flag_ms_asms)
{
c_parser_iasm_maybe_skip_comments (parser);
return;
}
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_ASM))
{
c_parser_consume_token (parser);
}
else
{
/* Parse a single statement. */
c_parser_iasm_statement (parser);
/* Resynchronize from c_parser_iasm_bol. */
input_location = c_parser_peek_token (parser)->location;
check = 1;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is (parser, CPP_EOF)
/* We parse at most, one line. */
|| c_parser_iasm_bol (parser))
return;
if (check
&& !(c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_iasm_bol (parser)))
{
c_parser_error (parser, "expected %<;%> or %<}%> %<asm%> or end-of-line");
}
}
if (!c_parser_iasm_bol (parser))
c_parser_iasm_maybe_skip_comments (parser);
}
/* (in 4.2 au) */
static void
c_parser_iasm_line (c_parser* parser)
{
c_parser_iasm_statement_seq_opt (parser);
}
/* (in 4.2 au) */
/* Parse an (optional) line-seq.
line-seq:
line
line-seq [opt] line */
static void
c_parser_iasm_line_seq_opt (c_parser* parser)
{
/* Scan lines of asm until there aren't any more. */
while (true)
{
/* If we're looking at a `}', then we've run out of lines. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is (parser, CPP_EOF))
break;
/* Parse the line. */
c_parser_iasm_line (parser);
}
}
/* (in 4.2 at) */
/* (in 4.2 av) */
/* (in 4.2 aw) */
/* This is the section of CW-asm-specific parsing functions. */
static void
c_parser_iasm_compound_statement (c_parser *parser)
{
tree stmt;
iasm_state = iasm_asm;
inside_iasm_block = true;
iasm_kill_regs = true;
stmt = c_begin_compound_stmt (true);
/* Parse an (optional) statement-seq. */
c_parser_iasm_line_seq_opt (parser);
add_stmt (c_end_compound_stmt (stmt, true));
/* Consume the `}'. */
c_parser_require (parser, CPP_CLOSE_BRACE, "expected %<}%>");
/* We're done with the block of asm. */
/* (in 4.2 ay) */
iasm_end_block ();
iasm_state = iasm_none;
}
static void
c_parser_iasm_top_statement (c_parser *parser)
{
tree stmt;
iasm_state = iasm_asm;
inside_iasm_block = true;
iasm_kill_regs = true;
stmt = c_begin_compound_stmt (true);
if (!c_parser_iasm_bol (parser))
{
/* Parse a line. */
c_parser_iasm_line (parser);
}
add_stmt (c_end_compound_stmt (stmt, true));
/* We're done with the block of asm. */
iasm_end_block ();
iasm_state = iasm_none;
}
/* Build an identifier comprising the string passed and the
next token. */
static tree
iasm_build_identifier_string (c_parser* parser, const char* str)
{
char *buf;
int len;
tree id;
if (strcmp (str, ".") == 0
&& (c_parser_peek_token (parser)->flags & PREV_WHITE) == 0)
{
if (c_parser_next_token_is_keyword (parser, RID_SHORT))
{
c_parser_consume_token (parser);
return get_identifier (".short");
}
if (c_parser_next_token_is_keyword (parser, RID_LONG))
{
c_parser_consume_token (parser);
return get_identifier (".long");
}
}
id = c_parser_iasm_identifier_or_number (parser);
len = strlen (str);
buf = (char *) alloca (IDENTIFIER_LENGTH (id) + len + 1);
memcpy (buf, str, len);
memcpy (buf+len, IDENTIFIER_POINTER (id), IDENTIFIER_LENGTH (id));
buf[IDENTIFIER_LENGTH (id) + len] = 0;
return get_identifier (buf);
}
static tree
c_parser_identifier (c_parser* parser)
{
c_token *token;
tree t;
/* Look for the identifier. */
token = c_parser_peek_token (parser);
t = token->value;
if (!c_parser_require (parser, CPP_NAME, "expected identifier"))
return error_mark_node;
/* Return the value. */
return t;
}
/* (in 4.2 aq) */
/* Parse a CW asm identifier. Returns an IDENTIFIER_NODE representing
the identifier. The CW asm identifieriers include [.+-] as part of
the identifier. */
static tree
c_parser_iasm_identifier (c_parser* parser)
{
c_token *token;
tree t;
const char *str = "";
/* We have to accept certain keywords. */
token = c_parser_peek_token (parser);
if (token->flags & NAMED_OP)
{
const char *s = 0;
switch (token->type) {
case CPP_AND_AND: s="and"; break;
case CPP_AND_EQ: s="and_eq"; break;
case CPP_AND: s="bitand"; break;
case CPP_OR: s="bitor"; break;
case CPP_COMPL: s="compl"; break;
case CPP_NOT: s="not"; break;
case CPP_NOT_EQ: s="not_eq"; break;
case CPP_OR_OR: s="or"; break;
case CPP_OR_EQ: s="or_eq"; break;
case CPP_XOR: s="xor"; break;
case CPP_XOR_EQ: s="xor_eq"; break;
default: break;
}
/* The above list is the entire list of named operators. We
can't fail to translate the name. See operator_array in
libcpp/init.c. */
gcc_assert (s != 0);
c_parser_consume_token (parser);
t = get_identifier (s);
}
else if (token->type == CPP_DOT)
{
/* .align */
c_parser_consume_token (parser);
t = iasm_build_identifier_string (parser, ".");
}
else if (token->value
&& IASM_SEE_OPCODE (TYPESPEC, token->value) == IDENTIFIER)
{
t = token->value;
c_parser_consume_token (parser);
}
else
t = c_parser_identifier (parser);
if (t == error_mark_node)
return t;
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_DOT:
str = ".";
break;
case CPP_PLUS:
str = "+";
break;
case CPP_MINUS:
str = "-";
break;
case CPP_PLUS_PLUS:
str = "++";
break;
case CPP_MINUS_MINUS:
str = "--";
break;
default:
return t;
}
/* If there was whitespace between the identifier and the [.+-]
character, then that character can't be part of the
identifier. */
if (token->flags & PREV_WHITE)
return t;
c_parser_consume_token (parser);
return iasm_get_identifier (t, str);
}
static tree
c_parser_iasm_identifier_or_number (c_parser* parser)
{
c_token *token;
token = c_parser_peek_token (parser);
if (token->type == CPP_NUMBER
&& TREE_CODE (token->value) == INTEGER_CST)
{
char buf[60];
sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, tree_low_cst (token->value, 0));
c_parser_consume_token (parser);
return get_identifier (buf);
}
return c_parser_identifier (parser);
}
/* (in 4.2 an) */
static tree
c_parser_iasm_maybe_prefix (c_parser *parser, tree id)
{
tree prefix_list = NULL_TREE;
while (iasm_is_prefix (id))
{
if (c_parser_iasm_bol (parser))
break;
prefix_list = tree_cons (NULL_TREE, id, prefix_list);
id = c_parser_iasm_identifier (parser);
}
if (prefix_list)
id = tree_cons (NULL_TREE, id, prefix_list);
return id;
}
static tree
c_parser_iasm_operand (c_parser *parser)
{
tree operand;
/* Jump into the usual operand precedence stack. */
operand = c_parser_binary_expression (parser, false).value;
/* (in 4.2 bd) */
while (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
struct c_expr op2;
c_parser_consume_token (parser);
op2 = c_parser_expr_no_commas (parser, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
operand = iasm_build_register_offset (operand, op2.value);
}
return operand;
}
/* Eat tokens until we get back to something we recognize. */
static void
c_parser_iasm_skip_to_next_asm (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
do
{
if (c_parser_iasm_bol (parser)
|| token->type == CPP_SEMICOLON
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->keyword == RID_ASM)
return;
c_parser_consume_token (parser);
}
while (1);
}
/* (in 4.2 az) */
static tree
c_parser_iasm_operands (c_parser *parser)
{
tree operands = NULL_TREE, operand;
while (true)
{
/* If we're looking at the end of the line, then we've run out of operands. */
if (c_parser_iasm_bol (parser)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is (parser, CPP_EOF)
|| c_parser_next_token_is_keyword (parser, RID_ASM))
break;
operand = c_parser_iasm_operand (parser);
if (operand && operand != error_mark_node)
{
operands = chainon (operands, build_tree_list (NULL_TREE, operand));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
}
else
{
c_parser_iasm_skip_to_next_asm (parser);
return NULL_TREE;
}
}
return operands;
}
/* (in 4.2 ar) */
/* A single statement consists of one or more labels (identified by a
leading '@' and/or a trailing ':'), optionally followed by opcode
and operands. */
static void
c_parser_iasm_statement (c_parser* parser)
{
tree aname, anothername, operands;
/* (in 4.2 ax) */
int iasm_lineno = input_line;
/* Keep sucking labels from the front of the statement until a
non-label is seen. */
while (true)
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is (parser, CPP_EOF))
break;
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_compound);
}
else if (c_parser_next_token_is (parser, CPP_ATSIGN))
{
c_parser_consume_token (parser);
aname = c_parser_iasm_identifier_or_number (parser);
/* Optional ':' after a label. */
if (c_parser_next_token_is (parser, CPP_COLON))
c_parser_consume_token (parser);
iasm_label (aname, true);
}
else
{
/* (in 4.2 an) */
aname = c_parser_iasm_identifier (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
iasm_label (aname, false);
}
else
{
enum rid scspec = RID_EXTERN;
if (strcmp (IDENTIFIER_POINTER (aname), "entry") == 0)
{
if (c_parser_next_token_is_keyword (parser, RID_STATIC)
|| c_parser_next_token_is_keyword (parser, RID_EXTERN))
{
scspec = c_parser_peek_token (parser)->keyword;
c_parser_consume_token (parser);
}
anothername = c_parser_iasm_operand (parser);
iasm_entry (scspec, anothername);
}
else
{
aname = c_parser_iasm_maybe_prefix (parser, aname);
iasm_in_operands = true;
operands = c_parser_iasm_operands (parser);
iasm_stmt (aname, operands, iasm_lineno);
}
if (c_parser_iasm_bol (parser))
return;
break;
}
}
if (c_parser_iasm_bol (parser))
return;
}
c_parser_iasm_maybe_skip_comments (parser);
}
/* APPLE LOCAL end CW asm blocks */
#include "gt-c-parser.h"
|
convolution_1x1_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x4_t _v2 = vld1_u16(r0 + 16);
uint16x4_t _v3 = vld1_u16(r0 + 24);
uint16x8_t _v01 = vcombine_u16(_v0, _v1);
uint16x8_t _v23 = vcombine_u16(_v2, _v3);
vst1q_u16(outptr, _v01);
vst1q_u16(outptr + 8, _v23);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x8_t _v = vcombine_u16(_v0, _v1);
vst1q_u16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _v = vld1_u16(r0);
vst1_u16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_binop__gt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int16)
// A*D function (colscale): GB (_AxD__gt_int16)
// D*A function (rowscale): GB (_DxB__gt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int16)
// C=scalar+B GB (_bind1st__gt_int16)
// C=scalar+B' GB (_bind1st_tran__gt_int16)
// C=A+scalar GB (_bind2nd__gt_int16)
// C=A'+scalar GB (_bind2nd_tran__gt_int16)
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_INT16 || GxB_NO_GT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__tan_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tan_fp64_fp64)
// op(A') function: GB (_unop_tran__tan_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tan (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tan (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TAN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tan_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tan (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tan_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sample_sections_barrier.c | /* Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/) */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "openmp_util.h"
int main(int argc, char *argv[])
{
/* #pragma omp parallel num_threads(3) */
/* omp_set_num_threads(2); */
dumpEnviroment();
printf("Starting...\n\n");
#pragma omp parallel sections
{
#pragma omp section
{
DEBUG(printf("Start section a\n");)
function_a(0);
DEBUG(printf("Start section a\n\n");)
}
#pragma omp section
{
DEBUG(printf("Start section b (sleep)\n");)
function_b(1);
DEBUG(printf("End section b\n\n");)
}
#pragma omp section
{
DEBUG(printf("Start section c\n");)
function_c(0);
DEBUG(printf("End section c\n\n");)
}
/* Implicit barrier here */
}
printf("End.\n");
return(0);
}
|
CLHelper.h | //------------------------------------------
//--cambine:helper function for OpenCL
//--programmer: Jianbin Fang
//--date: 27/12/2010
//------------------------------------------
#ifndef _CL_HELPER_
#define _CL_HELPER_
#include <CL/cl.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
using std::string;
using std::ifstream;
using std::cerr;
using std::endl;
using std::cout;
//#pragma OPENCL EXTENSION cl_nv_compiler_options:enable
#define WORK_DIM 2 // work-items dimensions
struct oclHandleStruct {
cl_context context;
cl_device_id *devices;
cl_command_queue queue;
cl_program program;
cl_int cl_status;
std::string error_str;
std::vector<cl_kernel> kernel;
};
struct oclHandleStruct oclHandles;
char kernel_file[100] = "Kernels.cl";
int total_kernels = 2;
string kernel_names[2] = {"BFS_1", "BFS_2"};
int work_group_size = 512;
int device_id_inused = 0; // deviced id used (default : 0)
int read_kernel_file(const char* filename, uint8_t** data, size_t* size) {
if (nullptr == filename || nullptr == data || 0 == size)
return -1;
FILE* fp = fopen(filename, "r");
if (NULL == fp) {
fprintf(stderr, "Failed to load kernel.");
return -1;
}
fseek(fp , 0 , SEEK_END);
long fsize = ftell(fp);
rewind(fp);
*data = (uint8_t*)malloc(fsize);
*size = fread(*data, 1, fsize, fp);
fclose(fp);
return 0;
}
/*
* Converts the contents of a file into a string
*/
string FileToString(const string fileName) {
ifstream f(fileName.c_str(), ifstream::in | ifstream::binary);
try {
size_t size;
char *str;
string s;
if (f.is_open()) {
size_t fileSize;
f.seekg(0, ifstream::end);
size = fileSize = f.tellg();
f.seekg(0, ifstream::beg);
str = new char[size + 1];
if (!str)
throw(string("Could not allocate memory"));
f.read(str, fileSize);
f.close();
str[size] = '\0';
s = str;
delete[] str;
return s;
}
} catch (std::string msg) {
cerr << "Exception caught in FileToString(): " << msg << endl;
if (f.is_open())
f.close();
} catch (...) {
cerr << "Exception caught in FileToString()" << endl;
if (f.is_open())
f.close();
}
string errorMsg = "FileToString()::Error: Unable to open file " + fileName;
throw(errorMsg);
}
//---------------------------------------
// Read command line parameters
//
void _clCmdParams(int argc, char *argv[]) {
for (int i = 0; i < argc; ++i) {
switch (argv[i][1]) {
case 'g': //--g stands for size of work group
if (++i < argc) {
sscanf(argv[i], "%u", &work_group_size);
} else {
std::cerr << "Could not read argument after option " << argv[i - 1]
<< std::endl;
throw;
}
break;
case 'd': //--d stands for device id used in computaion
if (++i < argc) {
sscanf(argv[i], "%u", &device_id_inused);
} else {
std::cerr << "Could not read argument after option " << argv[i - 1]
<< std::endl;
throw;
}
break;
default:;
}
}
}
//---------------------------------------
// Initlize CL objects
//--description: there are 5 steps to initialize all the OpenCL objects needed
//--revised on 04/01/2011: get the number of devices and
// devices have no relationship with context
void _clInit() {
printf("_clInit()\n");
int DEVICE_ID_INUSED = device_id_inused;
cl_int resultCL;
oclHandles.context = NULL;
oclHandles.devices = NULL;
oclHandles.queue = NULL;
oclHandles.program = NULL;
cl_uint deviceListSize;
//-----------------------------------------------
//--cambine-1: find the available platforms and select one
cl_uint numPlatforms = 1;
cl_platform_id targetPlatform = NULL;
cl_platform_id *allPlatforms =
(cl_platform_id *)malloc(numPlatforms * sizeof(cl_platform_id));
resultCL = clGetPlatformIDs(numPlatforms, allPlatforms, NULL);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting platform ids (clGetPlatformIDs)"));
// Select the target platform. Default: first platform
targetPlatform = allPlatforms[0];
/*for (int i = 0; i < numPlatforms; i++)
{
char pbuff[128];
resultCL = clGetPlatformInfo( allPlatforms[i],
CL_PLATFORM_VENDOR,
sizeof(pbuff),
pbuff,
NULL);
if (resultCL != CL_SUCCESS)
throw (string("InitCL()::Error: Getting platform info (clGetPlatformInfo)"));
//printf("vedor is %s\n",pbuff);
}
free(allPlatforms);*/
//-----------------------------------------------
//--cambine-2: create an OpenCL context
/*cl_context_properties cprops[3] = { CL_CONTEXT_PLATFORM,
(cl_context_properties)targetPlatform, 0 };
oclHandles.context = clCreateContextFromType(cprops,
CL_DEVICE_TYPE_GPU,
NULL,
NULL,
&resultCL);
if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL))
throw (string("InitCL()::Error: Creating Context
(clCreateContextFromType)"));
//-----------------------------------------------
//--cambine-3: detect OpenCL devices
// First, get the size of device list
oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_GPU, 0,
NULL, &deviceListSize);
if(oclHandles.cl_status!=CL_SUCCESS){
throw(string("exception in _clInit -> clGetDeviceIDs"));
}
if (deviceListSize == 0)
throw(string("InitCL()::Error: No devices found."));
printf("OK1()\n");
//std::cout<<"device number:"<<deviceListSize<<std::endl;*/
// Now, allocate the device list
deviceListSize = 1;
oclHandles.devices =
(cl_device_id *)malloc(deviceListSize * sizeof(cl_device_id));
if (oclHandles.devices == 0)
throw(string("InitCL()::Error: Could not allocate memory."));
// Next, get the device list data
oclHandles.cl_status =
clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_DEFAULT, deviceListSize,
oclHandles.devices, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("exception in _clInit -> clGetDeviceIDs-2"));
}
oclHandles.context = clCreateContext(NULL, deviceListSize, oclHandles.devices,
NULL, NULL, &resultCL);
if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL))
throw(string("InitCL()::Error: Creating Context (clCreateContext)"));
//-----------------------------------------------
//--cambine-4: Create an OpenCL command queue
oclHandles.queue = clCreateCommandQueue(
oclHandles.context, oclHandles.devices[DEVICE_ID_INUSED], 0, &resultCL);
//printf("resultCL=%d, queue=0x%x\n", resultCL, oclHandles.queue);
if ((resultCL != CL_SUCCESS) || (oclHandles.queue == NULL))
throw(string("InitCL()::Creating Command Queue. (clCreateCommandQueue)"));
//-----------------------------------------------
//--cambine-5: Load CL file, build CL program object, create CL kernel object
/*std::string source_str = FileToString(kernel_file);
const char * source = source_str.c_str();
size_t sourceSize[] = { source_str.length() };*/
//oclHandles.program = clCreateProgramWithBuiltInKernels(
// oclHandles.context, 1, &oclHandles.devices[DEVICE_ID_INUSED],
// "BFS_1;BFS_2", &resultCL);
/*oclHandles.program = clCreateProgramWithSource(oclHandles.context,
1,
&source,
sourceSize,
&resultCL);*/
// read kernel binary from file
uint8_t *kernel_bin = NULL;
size_t kernel_size;
cl_int binary_status = 0;
if (0 != read_kernel_file("kernel.pocl", &kernel_bin, &kernel_size))
std::abort();
oclHandles.program = clCreateProgramWithBinary(
oclHandles.context, 1, &oclHandles.devices[DEVICE_ID_INUSED], &kernel_size, (const uint8_t**)&kernel_bin, &binary_status, &resultCL);
free(kernel_bin);
if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL))
throw(string("InitCL()::Error: Loading Binary into cl_program. "
"(clCreateProgramWithBinary)"));
// insert debug information
// std::string options= "-cl-nv-verbose"; //Doesn't work on AMD machines
// options += " -cl-nv-opt-level=3";
resultCL = clBuildProgram(oclHandles.program, deviceListSize,
oclHandles.devices, NULL, NULL, NULL);
if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) {
cerr << "InitCL()::Error: In clBuildProgram" << endl;
size_t length;
resultCL = clGetProgramBuildInfo(oclHandles.program,
oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, 0, NULL, &length);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting Program build "
"info(clGetProgramBuildInfo)"));
char *buffer = (char *)malloc(length);
resultCL = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, length, buffer, NULL);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting Program build "
"info(clGetProgramBuildInfo)"));
cerr << buffer << endl;
free(buffer);
throw(string("InitCL()::Error: Building Program (clBuildProgram)"));
}
// get program information in intermediate representation
#ifdef PTX_MSG
size_t binary_sizes[deviceListSize];
char *binaries[deviceListSize];
// figure out number of devices and the sizes of the binary for each device.
oclHandles.cl_status =
clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARY_SIZES,
sizeof(size_t) * deviceListSize, &binary_sizes, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-2"));
}
std::cout << "--cambine:" << binary_sizes << std::endl;
// copy over all of the generated binaries.
for (int i = 0; i < deviceListSize; i++)
binaries[i] = (char *)malloc(sizeof(char) * (binary_sizes[i] + 1));
oclHandles.cl_status =
clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARIES,
sizeof(char *) * deviceListSize, binaries, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-3"));
}
for (int i = 0; i < deviceListSize; i++)
binaries[i][binary_sizes[i]] = '\0';
std::cout << "--cambine:writing ptd information..." << std::endl;
FILE *ptx_file = fopen("cl.ptx", "w");
if (ptx_file == NULL) {
throw(string("exceptions in allocate ptx file."));
}
fprintf(ptx_file, "%s", binaries[DEVICE_ID_INUSED]);
fclose(ptx_file);
std::cout << "--cambine:writing ptd information done." << std::endl;
for (int i = 0; i < deviceListSize; i++)
free(binaries[i]);
#endif
for (int nKernel = 0; nKernel < total_kernels; nKernel++) {
/* get a kernel object handle for a kernel with the given name */
cl_kernel kernel = clCreateKernel(
oclHandles.program, (kernel_names[nKernel]).c_str(), &resultCL);
if ((resultCL != CL_SUCCESS) || (kernel == NULL)) {
string errorMsg = "InitCL()::Error: Creating Kernel (clCreateKernel) \"" +
kernel_names[nKernel] + "\"";
throw(errorMsg);
}
oclHandles.kernel.push_back(kernel);
}
// get resource alocation information
#ifdef RES_MSG
char *build_log;
size_t ret_val_size;
oclHandles.cl_status = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("exceptions in _InitCL -> getting resource information"));
}
build_log = (char *)malloc(ret_val_size + 1);
oclHandles.cl_status = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string(
"exceptions in _InitCL -> getting resources allocation information-2"));
}
build_log[ret_val_size] = '\0';
std::cout << "--cambine:" << build_log << std::endl;
free(build_log);
#endif
}
//---------------------------------------
// release CL objects
void _clRelease() {
char errorFlag = false;
for (int nKernel = 0; nKernel < oclHandles.kernel.size(); nKernel++) {
if (oclHandles.kernel[nKernel] != NULL) {
cl_int resultCL = clReleaseKernel(oclHandles.kernel[nKernel]);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseKernel" << endl;
errorFlag = true;
}
oclHandles.kernel[nKernel] = NULL;
printf("clReleaseKernel()\n");
}
}
if (oclHandles.program != NULL) {
cl_int resultCL = clReleaseProgram(oclHandles.program);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseProgram" << endl;
errorFlag = true;
}
oclHandles.program = NULL;
printf("clReleaseProgram()\n");
}
if (oclHandles.queue != NULL) {
cl_int resultCL = clReleaseCommandQueue(oclHandles.queue);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseCommandQueue" << endl;
errorFlag = true;
}
oclHandles.queue = NULL;
printf("clReleaseCommandQueue()\n");
}
if (oclHandles.context != NULL) {
cl_int resultCL = clReleaseContext(oclHandles.context);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseContext" << endl;
errorFlag = true;
}
oclHandles.context = NULL;
printf("clReleaseContext()\n");
}
if (oclHandles.devices != NULL) {
cl_int resultCL = clReleaseDevice(oclHandles.devices[0]);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseDevice" << endl;
errorFlag = true;
}
free(oclHandles.devices);
printf("clReleaseDevice()\n");
}
if (errorFlag)
throw(string("ReleaseCL()::Error encountered."));
}
//--------------------------------------------------------
//--cambine:create buffer and then copy data from host to device
cl_mem _clCreateAndCpyMem(int size, void *h_mem_source) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, size,
h_mem_source, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem()"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: create read only buffer for devices
//--date: 17/01/2011
cl_mem _clMallocRW(int size, void *h_mem_ptr) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, size,
h_mem_ptr, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMallocRW"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: create read and write buffer for devices
//--date: 17/01/2011
cl_mem _clMalloc(int size, void *h_mem_ptr) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR, size,
h_mem_ptr, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMalloc"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: transfer data from host to device
//--date: 17/01/2011
void _clMemcpyH2D(cl_mem d_mem, int size, const void *h_mem_ptr) throw(string) {
oclHandles.cl_status = clEnqueueWriteBuffer(
oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_ptr, 0, NULL, NULL);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMemcpyH2D"));
#endif
}
//--------------------------------------------------------
//--cambine:create buffer and then copy data from host to device with pinned
// memory
cl_mem _clCreateAndCpyPinnedMem(int size, float *h_mem_source) throw(string) {
cl_mem d_mem, d_mem_pinned;
float *h_mem_pinned = NULL;
d_mem_pinned = clCreateBuffer(oclHandles.context,
CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, size,
NULL, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem()->d_mem_pinned"));
#endif
//------------
d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY, size, NULL,
&oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> d_mem "));
#endif
//----------
h_mem_pinned = (cl_float *)clEnqueueMapBuffer(
oclHandles.queue, d_mem_pinned, CL_TRUE, CL_MAP_WRITE, 0, size, 0, NULL,
NULL, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueMapBuffer"));
#endif
int element_number = size / sizeof(float);
#pragma omp parallel for
for (int i = 0; i < element_number; i++) {
h_mem_pinned[i] = h_mem_source[i];
}
//----------
oclHandles.cl_status = clEnqueueWriteBuffer(
oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_pinned, 0, NULL, NULL);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueWriteBuffer"));
#endif
return d_mem;
}
//--------------------------------------------------------
//--cambine:create write only buffer on device
cl_mem _clMallocWO(int size) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, 0,
&oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateMem()"));
#endif
return d_mem;
}
//--------------------------------------------------------
// transfer data from device to host
void _clMemcpyD2H(cl_mem d_mem, int size, void *h_mem) throw(string) {
oclHandles.cl_status = clEnqueueReadBuffer(oclHandles.queue, d_mem, CL_TRUE,
0, size, h_mem, 0, 0, 0);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clCpyMemD2H -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_VALUE:
oclHandles.error_str += "CL_INVALID_VALUE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
//--------------------------------------------------------
// set kernel arguments
void _clSetArgs(int kernel_id, int arg_idx, void *d_mem,
int size = 0) throw(string) {
if (!size) {
oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx,
sizeof(d_mem), &d_mem);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clSetKernelArg() ";
switch (oclHandles.cl_status) {
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_ARG_INDEX:
oclHandles.error_str += "CL_INVALID_ARG_INDEX";
break;
case CL_INVALID_ARG_VALUE:
oclHandles.error_str += "CL_INVALID_ARG_VALUE";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_SAMPLER:
oclHandles.error_str += "CL_INVALID_SAMPLER";
break;
case CL_INVALID_ARG_SIZE:
oclHandles.error_str += "CL_INVALID_ARG_SIZE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
} else {
oclHandles.cl_status =
clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, size, d_mem);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clSetKernelArg() ";
switch (oclHandles.cl_status) {
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_ARG_INDEX:
oclHandles.error_str += "CL_INVALID_ARG_INDEX";
break;
case CL_INVALID_ARG_VALUE:
oclHandles.error_str += "CL_INVALID_ARG_VALUE";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_SAMPLER:
oclHandles.error_str += "CL_INVALID_SAMPLER";
break;
case CL_INVALID_ARG_SIZE:
oclHandles.error_str += "CL_INVALID_ARG_SIZE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
}
void _clFinish() throw(string) {
oclHandles.cl_status = clFinish(oclHandles.queue);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clFinish";
switch (oclHandles.cl_status) {
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reasons";
break;
}
if (oclHandles.cl_status != CL_SUCCESS) {
throw(oclHandles.error_str);
}
#endif
}
//--------------------------------------------------------
//--cambine:enqueue kernel
void _clInvokeKernel(int kernel_id, int work_items,
int work_group_size) throw(string) {
cl_uint work_dim = WORK_DIM;
//cl_event e[1];
if (work_items % work_group_size != 0) // process situations that work_items
// cannot be divided by work_group_size
work_items =
work_items + (work_group_size - (work_items % work_group_size));
size_t local_work_size[] = {work_group_size, 1};
size_t global_work_size[] = {work_items, 1};
oclHandles.cl_status = clEnqueueNDRangeKernel(
oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0,
global_work_size, local_work_size, 0, 0, NULL);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clInvokeKernel() -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_PROGRAM_EXECUTABLE:
oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE";
break;
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_KERNEL_ARGS:
oclHandles.error_str += "CL_INVALID_KERNEL_ARGS";
break;
case CL_INVALID_WORK_DIMENSION:
oclHandles.error_str += "CL_INVALID_WORK_DIMENSION";
break;
case CL_INVALID_GLOBAL_WORK_SIZE:
oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE";
break;
case CL_INVALID_WORK_GROUP_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE";
break;
case CL_INVALID_WORK_ITEM_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE";
break;
case CL_INVALID_GLOBAL_OFFSET:
oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
//_clFinish();
// oclHandles.cl_status = clWaitForEvents(1, &e[0]);
// #ifdef ERRMSG
// if (oclHandles.cl_status!= CL_SUCCESS)
// throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents"));
// #endif
}
void _clInvokeKernel2D(int kernel_id, int range_x, int range_y, int group_x,
int group_y) throw(string) {
cl_uint work_dim = WORK_DIM;
size_t local_work_size[] = {group_x, group_y};
size_t global_work_size[] = {range_x, range_y};
//cl_event e[1];
/*if(work_items%work_group_size != 0) //process situations that work_items
cannot be divided by work_group_size
work_items = work_items + (work_group_size-(work_items%work_group_size));*/
oclHandles.cl_status = clEnqueueNDRangeKernel(
oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0,
global_work_size, local_work_size, 0, 0, NULL);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clInvokeKernel() -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_PROGRAM_EXECUTABLE:
oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE";
break;
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_KERNEL_ARGS:
oclHandles.error_str += "CL_INVALID_KERNEL_ARGS";
break;
case CL_INVALID_WORK_DIMENSION:
oclHandles.error_str += "CL_INVALID_WORK_DIMENSION";
break;
case CL_INVALID_GLOBAL_WORK_SIZE:
oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE";
break;
case CL_INVALID_WORK_GROUP_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE";
break;
case CL_INVALID_WORK_ITEM_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE";
break;
case CL_INVALID_GLOBAL_OFFSET:
oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
//_clFinish();
/*oclHandles.cl_status = clWaitForEvents(1, &e[0]);
#ifdef ERRMSG
if (oclHandles.cl_status!= CL_SUCCESS)
throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents"));
#endif*/
}
//--------------------------------------------------------
// release OpenCL objects
void _clFree(cl_mem ob) throw(string) {
if (ob != NULL)
oclHandles.cl_status = clReleaseMemObject(ob);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clFree() ->";
switch (oclHandles.cl_status) {
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
#endif //_CL_HELPER_
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(image,token) \
{ \
(void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(Image *,MVGInfo *,const char *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_opacity=draw_info->fill_opacity;
clone_info->stroke_opacity=draw_info->stroke_opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,&draw_info->clipping_mask->exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,&draw_info->composite_mask->exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
magick_unreferenced(draw_info);
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorDatabase("#f00",&clone_info->stroke,
&image->exception);
else
status=QueryColorDatabase("#0f0",&clone_info->stroke,
&image->exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
&image->exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageClipMask(image,clipping_mask);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(clip_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageClipMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
status=SetImageBackgroundColor(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(clip_mask,TrueAlphaChannel);
if (draw_info->compliance != SVGCompliance)
status&=NegateImage(clip_mask,MagickFalse);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(composite_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
status=RenderMVGContent(composite_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(composite_mask,TrueAlphaChannel);
status&=NegateImage(composite_mask,MagickFalse);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,(double) gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,
(double) gradient->radius);
else
repeat=fmod(offset,(double) gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
*next_token,
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (status == MagickFalse)
return(MagickFalse);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=(&image->exception);
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MaxTextExtent;
(void) QueryColorDatabase("#000000",&start_color,&image->exception);
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MaxTextExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,&image->exception);
if (draw_info->compliance != SVGCompliance)
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (graphic_context[n]->fill_opacity != OpaqueOpacity)
graphic_context[n]->fill.opacity=ClampToQuantum(
graphic_context[n]->fill_opacity);
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
if (graphic_context[n]->fill.opacity != TransparentOpacity)
graphic_context[n]->fill.opacity=(Quantum)
graphic_context[n]->fill_opacity;
else
graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,&image->exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,graphic_context[n]->composite_mask);
}
break;
}
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MaxTextExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
GradientType
type;
PixelPacket
stop_color;
GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&stop_color,&image->exception);
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,&start_color,&stop_color);
start_color=stop_color;
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (graphic_context[n]->stroke_opacity != OpaqueOpacity)
graphic_context[n]->stroke.opacity=ClampToQuantum(
graphic_context[n]->stroke_opacity);
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
GetNextToken(p,&p,extent,token);
if (*token == ',')
GetNextToken(p,&p,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(p,&p,extent,token);
if (*token == ',')
GetNextToken(p,&p,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
if (graphic_context[n]->stroke.opacity != TransparentOpacity)
graphic_context[n]->stroke.opacity=(Quantum)
graphic_context[n]->stroke_opacity;
else
graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(image,&mvg_info,token);
if (coordinates == 0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
(void) ConcatenateString(&clone_info->text," ");
status&=GetTypeMetrics(image,clone_info,&metrics);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryImageException(DrawError,
"NonconformingDrawingPrimitiveDefinition",keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
return(RenderMVGContent(image,draw_info,0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MaxTextExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0]);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetFillColor(draw_info,x-start_x,y-start_y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -SSIZE_MAX)
return((double) -SSIZE_MAX);
if (x > (double) SSIZE_MAX)
return((double) SSIZE_MAX);
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
exception=(&image->exception);
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,draw_info->clipping_mask);
status&=SetImageMask(image,draw_info->composite_mask);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MaxTextExtent);
composite_images=ReadImage(clone_info,&image->exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
(void) SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine);
else
(void) CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.opacity != (Quantum) TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,(Image *) NULL);
status&=SetImageMask(image,(Image *) NULL);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p);
status&=DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_opacity=OpaqueOpacity;
draw_info->stroke_opacity=OpaqueOpacity;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
status=MagickTrue;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == MagickFalse)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(status == 0 ? MagickFalse : MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if ((coordinates > (double) SSIZE_MAX) ||
(coordinates > (double) GetMaxMemoryRequest()))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path)
{
char
*next_token,
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickStatusType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(image,token);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_binop__bor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bor_uint32
// A.*B function (eWiseMult): GB_AemultB__bor_uint32
// A*D function (colscale): GB_AxD__bor_uint32
// D*A function (rowscale): GB_DxB__bor_uint32
// C+=B function (dense accum): GB_Cdense_accumB__bor_uint32
// C+=b function (dense accum): GB_Cdense_accumb__bor_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_uint32
// C=scalar+B GB_bind1st__bor_uint32
// C=scalar+B' GB_bind1st_tran__bor_uint32
// C=A+scalar GB_bind2nd__bor_uint32
// C=A'+scalar GB_bind2nd_tran__bor_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) | (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT32 || GxB_NO_BOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bor_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bor_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bor_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bor_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bor_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB_bind1st_tran__bor_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB_bind2nd_tran__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ssh_ng_fmt_plug.c | /* Fast cracker for SSH RSA / DSA key files. Hacked together during October
* of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Support for cracking new openssh key format (bcrypt pbkdf) was added by
* m3g9tr0n (Spiros Fraganastasis) and Dhiru Kholia in September of 2014. This
* is dedicated to Raquel :-)
*
* Ideas borrowed from SSH2 protocol library, http://pypi.python.org/pypi/ssh
* Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#include "arch.h"
#if !AC_BUILT || HAVE_BIO_NEW
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sshng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sshng);
#else
#include <string.h>
#include "aes.h"
#include <openssl/des.h>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // Tuned K8-dual HT
#endif
#endif
#include "jumbo.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "stdint.h"
#include "md5.h"
#include "memdbg.h"
#include "asn1.h"
#define FORMAT_LABEL "SSH-ng"
#define FORMAT_NAME ""
#define FORMAT_TAG "$sshng$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "RSA/DSA/EC/OPENSSH (SSH private keys) 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define PLAINTEXT_LENGTH 32 // XXX
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
// openssl asn1parse -in test_dsa.key; openssl asn1parse -in test_rsa.key
#define SAFETY_FACTOR 16 // enough to verify the initial ASN.1 structure (SEQUENCE, INTEGER, Big INTEGER) of RSA, and DSA keys?
#define N 8192
static struct fmt_tests sshng_tests[] = {
{"$sshng$1$16$570F498F6FF732775EE38648130F600D$1200$1777f12047d4ebab06d052d52946e5e0e73b41d5077b20e1ffe1c97ef9459b8c6844fecc24fdf63314c8889398fa140026339c85336278600e299c0f4c236648ca684f0c122e66d3e860e19eab8b46a564eb101def1c6a38f2f1800040c6b59a66e7b86e145e180f8a126e46544be1e17dd32e4e72f735c9e6b0ca4bbbb32ccf34ba0a7827858b0be32f9e53f13466e2ac78c3fecdf2a51cd7871286a3a91f9c71ae9e857a74bcc06071af6f60d827f7e13ccf6c1be722246c0796f509744c2b1b1452315ea6f86a1c8765d1f0c1d795349b4ea1ba229318b392fe505292cd0c6b4e3e9b2acc13b96943d92fa5635e05b7795989906274b0fb1894102d07facdd8f2122299960e1490823d62bbd5bf6d6c92ed26e68cc2edc93fbffec557a5d187fffe085ded9408ac63293851a684ca10d6e9a4ee9b5c552c827caee1f1c41870fe2d0e79bc4a0b85478fa82a58f947d345122c8ac7c80ba2ae8452b093dda70e2a4329fce70af9cf98e19477a622083664d1e62393a01b20371fc5be9390059f1c4af75d5448a2fbe1aaa46701c696afec927c67d15c046036531d9252faa08bbf9ea0e019ea574e6af94edd7ec17c83c0f87e34c7456e19bc53b2de04dafa83267694c1f61d038e0fc5f8f1b8ce573da470e6db6d38c0e8f7141ad9e9609ea408e3823271e987766039d484bc88f23f2f2a1175636ece950c7d82f43726287fef37da945ec6ad6adc04cb59f66087f68a3e84e8cc39c578bcbce3aaf67f1325d3d20dbd5872cc88ab72fc0bda05bf969eca08f8cafb306424a1597ba5d612e155b4723c2c1bee9a8e3d195be3b798ea417008a2340a919e23ac899ea4dbc4ef05af2cf6b12293eeb293584b37d3f8465e36a62d65b21f68725603e11dc14acf4e3855e25980387a34a34919fdd49844ed888e37199bb26df1bbbc303e895615fcbb0aa9ddc8a2aa685da942a1e68dc3a355d27236f74220d404d25e0ac64ae9203bb04296b4d67481a4f516fd22e47092073c9c44fa098670d736c5c509e55d6b40d3bf346ea5bb0007e32e9d8290c2633621fd84c2f5f428a5649ff3a16d00fec21381543202f2ee12078ddea8a371935f2ffa15aafa644e111a29c1c4703bf8e9cf1397356e296c5484558b96639b9cf3703aabff0cf42864dab91b1e09c6439159bc95374da7a5d416402286390e76cb766cd94e7a002596e8862b8d7e46c1fc6f7bdd0b93c73d2dc3cf58ea31bc549086209f450bb7460d5e9ba0d0f7b80337651f45bf83bef1783c3a15631c82428bfe167dc0692402d7f15144fff01ad8596970439ce8a2df0107c85a23ef93edd19f62de499ab58ada581886494c3e52dd5ec53c191f6d62729729a252c2c0d8024950d1637cfd7c61a4fe64ce41cde76fe00fa2607af66a44d3b4b8836820f40c03669f08b4e986f4d03c09e3c026a910f83be623d7f68ff80d81662f020f433f7a896e10134a278cd9a8517d3bcd77c5287f7d41bc52d2f8db79b5f8f9ed6d6f45a482b13cb91ecdef43ebe38f5ad71836185ae6faf1dd11c50cc1759e4834fcab2b3523d4224a32d2eaba224a2c950dac7524afc74f02f17b511f3b22d577a6928d40909bed64f6ed27096dff591a8fbee3f32733fd2b36c0c4708a5224f165af000d93832e211ae52465f680e7a4fd66bb5eb210c4402eb58f6ebfde", "strongpassword"},
{"$sshng$0$8$DAA422E8A5A8EFB7$608$fa7b2c1c699697dd487261a213a0dd088a86bc03f4e2db8b87ad302e3581bdd8ed17d0a3ced3e7179ef17beea9064ee862017f472de293d655f6b1cd7115e27c328cf5caf1b5896952590cd82d123fcf6c5da3b43f5435c829ebb595300c828e04d57c7ade57efe006305b32fe79afd0d14cadba681b4dc3a69b25a1e71ddbd353465217c311d11721f1cba05d1226ff0e7d261156f0837753bcaaddfec383591f61470a4318cf679046d43490a1eef33014a90865917ccaa16f986724b8ee421d990327a46410362b4992406af41a88e3c5e5bbb7707ba08517e7ac8295ad0b934c38968f05fd372f1ee29e24eddcbbacba5b3e1b7150e51ba4e17b4f54319630e2d5372adc46e4de437f64b3d11670eb25fc94c7e9bd0579806bbf16c6cfe529a4bc0d3918ca4777f8418e789163660d9bbe0aa297857ee4922dffe310e6967fba2ee2e06707d9bbd9c8601bad7ccfdcb8a948074de511be7d588b7b71d4b5f0b1e19020b54efc4d626b2e4d85c0a40682517128b9ecc29f882996f4f6b655bb1986e293cb5271fe98c61d8b2e6e8338fee42f22674fc8b2da475663ba19644e7de76927cd9e333b533ad7617cc7a9f19dc7c00c240ed92c2fb1aaf6495bd16ab9fae4650567ad8b175d02f9e6a9737362168035670017fd9ad87cf4e916f47baa5efe0d04939295fba608f83fa811b946d12afe77836dc6d0d398824a355926ce5848dace776c7a7ab7109be495894bc98a2cf04107368d5d8777a1d0ef19782ebb1527b564ac0f5d4ac91e81f435cc21f5905b9753ee1a79913306957589943da161a6f5dc3082b80930553769ce11d82d9cb12d8a12bb4e56eb3f1200eb", "television"},
{"$sshng$1$16$A0B8FCAB2B655BA3D04B2020B89A142E$1200$a4dbb4c526a6bea3aeca26e6d89f0c19ebdfb9154ce4cdb4bbfc3420ffce58dd6ae1077dba9753a001b22b07e4248bb2d4a3c4bf2cbae43f8c29f55c3c36c656aa7262fd2f429a8f7fbc443c175687b20c78ed3e409a03fb7b0afa22ef2fad63d580ce00e31948171787a8861e34d4c966d0c54d299585df757a76a278c28e899b7b16fe74d38ad9f6220a2ebbd2b427a3f29436feb2e000a3f1b26a3906eb84314a8f8dc211aeab0e5d5c776b54a59c3630a96de506fdfcf7a6991bae4e90ef2f6f99d5a92c78eddc1f7bd75a94bc472c32ef82b56261889a60fbaeee0b145c4aa785bff8b854b8c61fde3f018e10e9b4de6fbf5aa7ff30d985a8b8da1e55459855cd96076d0de5ff31a593ca7ff4badb07886808c624ceaf955569138c57fd9006877d8a174bce3347b72490d5181d83a20500dc49e8160d075659b568820ac2788a50aba0488a598c6d90821026c9a6213f279b8773eb3c5b60a73e48199ed7cba66595e7f219c4d0f5e231219619ffbd3d7bd1dad4ada8bf8d9ddbd5319ff47922e6858946778daf0e6b47973db77f56dcc356691ccc652ccd53d9f9895c896d99cf0c498e5a8d712f2e8a159a80e8a3e68b812650f0ddb0e1300438b914f4c28d232c443768bccaeb204212494782003343a5cf6d455b95efc94c8d95544db32c0539d0e1fc0288b5ecfcbc4bb7b6278a54093a56ec0ad5928c113aa96a114d7fd3aec173759f5c081f1d0a2f0922433ff17911901c0f0f940b1f345d161d91ecd4456e9b8458a14e0fcbaf2b750201c10cff3c8f387004b99be515f45c00200efea4e36d83524a760c20518d902e38d6121bef29b479edbf44be4c51730c3bbc86dd6abc40b67470e12b8235cb1317b6dae34d99248f3a8f98a77d848360c01a645f76c3abc3f66af0d1f0f7bbb77930b3f85430062fb1a82c5aff1350bdba049a8bc7bcc33e61fd3e8484b9e6d51ea121337b7553284cd1222a2469e1c7158f13ff63307530243af25b4b36d19ba0604212ebcb42b450c475e238c2b9f021088b16aacfb6e564eef86860fd077f90de471fc26621360609e526444e7556bb8d6de703271a4ba8dec254305cd1163f90a32d8966f599903de0e4b62e3a8db15753fb099d164d9bd44c05f163fd96ef73382c779214c8ec93498f2f5fa31a74ad6ac3136a37c6f6c27b1dd7b93c1e292f2ef0d569581f45cb0747ee5a2fcba5781cdc96b9b2f07bdbaf7ff4e0432873072112fd17792c91548393cd58a7eb8b126f17ee107f9670567c0ab6e6b9a2997054d968feb29f479fb8b7888138971a14228bad1854d9804f1bea77014b7f0d1037444178d66d2db19b660cf5e84726b2f730662a1df93abc54ae521d3d1691fb4fa48b087ead9dfccf4e6367d9a25f48a019a6affbec84c20ae7b10c2a169cfa07a4d26c5035c02d3b7d01681bf56bf568ab1f740c86ee6f43b8b440eea1f1139a89fa5bc653164426856e3a5e22ff5fed05ba7a054f6d4609eb142ef113a24f05b92ba72c40cd9bde09d8125d462fd30bab15cb47130fa30730b26c0d399d14b9cb42ec56df024bb9bbcd18ab4d279ccf82b2c1fee8fdbade8bd506791a6fd51349b24cdc36ec4d88e6dd43a85b92a71458908271d298681f54aa567262fc70260cc15d7f5559abd7e7ee4d2c7c727bf5036c469b690ece969480240c", "Olympics"},
{"$sshng$1$16$ABF86CF7849BBC5C661A69F1F7B4C87A$1200$4941cb1e3d19fd7173e7b97faf6484d803fe6737628241e7e848b4d02ef63c91a99aba940c116157f96e7c8e91550df51df80324a5630244ae83fcc04304ca9f7a4d9621cab45a6d513afc06b2364c222a7907729e3562f676fb94d7a3cfb2551de2766e9d67c035fecde455fd741a67521d0f71673d7c364ac314850314b31b6899198544c0f2ab788ebd435cd291ae8c12576837f784ab7cd8a9bc24bea3823034154df1b3d9b6a064131a9eb81b3fd298f128458cfce450305602e087ea9957e29117942ee7a2fd8a980da55c364f781feeb1bf355ee0de54ce946d85930f786d6f6175254d5a4370ddc5c88ef13e23429b0d9d01f413f08ce5253141d84c8368308890c69b65be93a415923f98bc898f9cb4abafcbcddf2785f698e091d75eea0a90216ca47d0459cb2b8d95a8989f604a0c7bc8dc690791c0c73e6f7a2628ea7ebd8e1a39ae583c91668dca7806f226ab361f07bfd35f7130aefc83071b865cc004f963ef80a750008e920f1854321949d6143ffc33b60b58015d5f32c820006b0a91aa08755fd859c845d3a75d89350d9c12e7df32b9bcd188681b0981ac4713505c4b516ee4d1073ea715b68d0c10ce3f562f0b5b5383a6bd53008ec0e8927d78d8fd21d760e67da700db638f3835cfd523046ee0f2fffed05c3bd902b66765629f428bc2808e300fbe2064af9ab125ac4195f3b5756e09059cc391127c8efba8e50eaeb7e0a4d98561ce9540fa6b9b6333beb1efed5751e7acc1aaf4f0ff975e548a21b08a2ab49d4e6bf2336e60eb8684debe5d611769cee17c38e02d2057284d7948fdbe459092a0e4471107f55562aceb1045f0f1cefb3d89e549422f27618137c48dce1f149f6c8748d4a1eff89eed9140ec598def8d38457a239ee167af6d60ae995261d9cb47ce2d4d25b1520f8b75408b45265cf14d3892dcb53732fa4151312f4f6c8d46a54d07c23b4b253003489a28d544fa903eb0a72a3ae914dafed5218ce8d745b23bde33c9e346db79051e763866fba38f123b32c110b4168c3baf2ace735d0fcf5ccf7c2a29d67d4831c0cf3472ab8b197ed953056c42d7cc91646ca12a7bebb23fa4fb063217b7b7c9fec7688788798424acc32b3c704a91bee6a63ca5a2186df80e225f96679568c936c9a47b5615858211c72441a9ff4dc265ba98f346984bf92969af9bd035f93a47ddf8beef9ba84eacc1f76ee4bd1eb242dc9fb2949d287f685369d1122865926270f8bc83d7118801e77e48fd2dd4b996231564d1649c4636b734e483067c1181d1edc6dd424f517cd3ea3fe1ab904cda78b7b7d6c856a82c7e1c6ba3e9fb93da1dfeaf4e3eff86b4541ab38f526f509b915f787d6abd4a4c7174dfcb18f36ba72fa69b61a060b2785b3d3e8d28e9f6aa1a32aca3948ee48188a7ee24b160f3a6bc98297bd852d0759080cecd85dbc91bf4404705948c6a169e140a2479cdf5b840c3d6f99ea4e09b76730b4d33300f6a963c90cb0e07833a4bf314d72d81ae8ed5cf5ca4bcb6f35acb0c7d8298b70a5b61f87b13c3b1d02b56fe42c5465ad57dd4041b9b36943188acb8742052669b95fd98f3d18351f8748e9eb0f47d11a4d6ca2ec0348ef7d24e9f80c1dc239b513ed7867f25903875a1e9a983c5c8475b8de1f7f70423f1f472fca1e99a52b14105c4a47edb657eb87d0353", "extuitive"},
{"$sshng$1$16$925FA0A2EF7283A2F69C6CE69121D43C$1200$0498402851fd405114a860a1fdc760752bc8b7f44c77b2ef6a6d46ed3cee48d963bf34b905124c18823bc69819bbec29edebf4e697afffec2c35e79b993ff28b92d0355758b9c4ea00fb1f4bd48732059643ca2144b9c35de734d8db395076cb7c0468f6cfbabb1646345f907af82bf1598733d7aaa5496c55e662075d6bdb47cb941160fd1106570303d009bdc89fa3ecc07c84c3f91238a51db8ecc09f8e6b6c1395ce57970cbf2a3ef1341ddcb404e95832f0535a30b17048554b3341502619c48685db4706855ce62a86b3953f1219d4dae10243265d01264fa6408006188a40683e5de4952cb6796cd2593e9365065f51ff21b23b8bc075445226092b988114962ed5f4b97128cc69eca7a3d1169d2d83a632a5cc51290527bc848c7dd3d76554b28bb2bea0626f4fd27f3b9610e827e8211c60879d77ea1593d80908618b55081048bc2baef6848c410372b9a69358feb95c23d747f81b59577c601d55337b7c737d77bd742a115681a778c3d8e513a3ccd25cf833a32c73bf04476131b2bb498fac9496597163766b5f466b2478a564736c245cf0a0bf4b33be13eb2360dacbf8573b342f336d0341229654cd140674b18e35c04f917a9668306b4c93285825bdc8494c209d103212ea1deac7839db28acfb50fabc5c2b5057333ecbcb685adef5e962a526a02fd44f40a5af9c27d4211af129ad47b5fbc1d5f9f01e5ad1c53f728ead66a45cb8e6a9c1237aeb02374225ef2b63bc3ea6b2b1ab6136f90236ed5de5f88c6edde8ea75db8cf9aed8030537731dfe3ee855ab501f0235aeb05c8b2e3f4668ca0ab230cc8764863bf3ea71bbce2763556a14cdc5e09b0fa8e9ce6948d377b087fe04d1a5ae2ca61350514376cf447119fad0ea158b16b86be8f43742fb9934d3c1e8cc46497c191d1703a85e0b8b102b27595471687c5d1335a2290214fd46d9568d4b2845b88f116d5c2b3e3766030beb3d71157ff0c4fabd13aa173795db5b88d059ec79bf50c22f3119411b4279d1c7c0e88a7b01fa47e52553913b0ceee272500fedfa28483a849c186ce31b2134945dcaa84c13f7e474d59b0a0f5f768a8ec4cd58c8499b3ba3e1880fa7764ea9e424b29e5f6ea93671bce2985ea0d51efc2774f023c65e99be3db57c4a83e3c2f59fee62f60fa8c7eb66ff870f05cffd7ea208520a0640fe86f619944b389cfe695567ebc82829407273ac67130d3b09c8ff172a77a8ef56b18aac65d5607ef9f6ee791c0ec5b6447bd847b5d6a5411142a110700d5bb04424111ddfee27139ebad931da60de1e8bfc87f2b53b8720435d3dbb358445fc3493ada42192783741f72b5934d6a399e1ea16291fad9f38e49f23e3ad7303d4d1e5677b9a81aff8dfca7abb33455e4e7858a9de656e4239c22ac2e91b558bcc25b356be583487ffc24459873febd2becae6056544d56fe670342347048a8abca019d2203794fd8652d31899f094d67aa304d1e607460efbdf05b3b407de54fc9e33d1879fe577091036b77e43e382f1acbbc12cb3bc83f25a4791265741e018b4825beb0a6901db19ee58a3c378df4ffeb4c9def7e730a08546d3f698f5ca4f98c81deb2982729ab95167ecaa1d6320b12d48f4de2fc9891b8e117c88a6f5bff046b1ea8cab4b0af8a488dfa6353ccaa3125e959322bd0ad4662ad15cffb86f3", "C0Ld.FUS10N"},
/* DSA test vectors */
{"$sshng$0$8$78DAEB836ED0A646$448$95d5a4abd38c957a969a322aa6936798d3c8523e6e553d762e4068b130294db89b4e67b790825bd6e0de1b60528557d8faf0ce4d413d92818f0cbb315b5b7902df845722032bc6883b4b87b5e5cce406c15f6d0b2d45916d156a661b0cc6a421dc7dd794788df9085a59c6f87c5baed7c6bc4a48a64c5a439d9b9f7e808397fce1fc1ed789e0114cb03cd392bf660541041c1f964476044d39dd71eb240231f4111494b3fbe85a35f2bbe32d93927aedecf959e786a51be450ade61e746b8eae6174016e8dabf59a358a518c3445c93b4824e61c065664f24b3e773643c0e47996b7c348cefe63407303cbb37e672905bb0a4fd51e4cfd920563863987f96f9fa2098d0ed5c9244f21ba4df28d9826fd8e0f525af349f7b54f0c83bee8de8e1d3702a6edc0a396af85b8805d3ac4a0b01f053d0454856fa3a450f199637ae0333670483a454769b5bcbb5a6329d07c0ad6ac847f11e32ccb835650fb9404880c1ad19548cfb57107d43cc8610b9869165a8b116867b118f97ef74f09ab285114512f599d066d46dae846a1b04787f3e30410b234e5fc098e8a39419a2dbdb5a25c709b13fd31eb2d0e6994e11df0e32ff45b1a3c95c153ce606912a8dc966daf", "television"},
#ifdef DEBUG
/* this key is SUPER slow, now that OMP_SCALE has been increased. */
/* it would be nice to get one of these with rounds set to 2, */
/* instead of the rounds=64 of this hash (pass_gen.pl update) */
/* new ssh key format */
{"$sshng$2$16$cc2c3c68c39e0ba6289ed36cb92c3a73$1334$6f70656e7373682d6b65792d7631000000000a6165733235362d636263000000066263727970740000001800000010cc2c3c68c39e0ba6289ed36cb92c3a73000000400000000100000117000000077373682d727361000000030100010000010100af9bf6a900464f154916fac3d80476e0ee739ff7f25a96b562ff9f4262db1972992947dfa89da47f9fa5f4d9e54a2d103ce63779746888c298693663310f054af1c1dc90f62b22f630703726631c03ff217c29a32fd9f9bc178aabe9666c37c2c2bf4a2b4c528efe51e755053216d41e860ef996b549184cd15bd17641128690d2946a76261954edfee942bbefbb182df320d3da7f46a5fcddc15b5ecbf9b1b822cbc9ef978e8b639e8eab2e3b1229d429da4f6bdc27af2f2aab0e187a6cce91b95a8ac6f5602773d0014f1e8124a89e43e502bebb4d21f6a148e208e2d591391d1aede6a0a6d499a3de9996474310dd9d3233e3f05e9d0e85aba44715e838bd000003d08168da8d056f904faf9d80b22c08141e8b068a3af64ace3b5ffbad24b884cd37ae7ad89546031ab834d612b44266b95263a5c38f0d628d704caf70944629ad66d3cef974ec4faaaeb7d7df67f1321bb606ec6e14060c0de1a63a5732ca89b94ae765cb0671a4a1a76b42c06c220546bbf0f8a88471c0bf4200a0cbe0d346be67f688dcf76a3666f7c4447b3ced2d0c9a2fa50abc6ca222ddd70aeb82d65f8fefa313b3db76c5a03478bebc9e0942e17c07ae11d1fbe1b0b380ca2506a26aaf5cdb8668af186d1bc293844bd9c2cc8bb40530387f9a5e11770484593af69384fc003beb82beffa00c1b23f7d6a9bd8f6153cb7abd9531008df384a3455d7cdd7020df4dc507f34e697ad437f01989271b17b93045265f20e6fd02f63ac1e13ec85f8224bc60dd91e15dcfa2ec4f6986e3b37ea6bd571ca18089402f80c121323eb774708cc6ab470e05a53428b65dede47ded97c4f5941be44f6290d5ccdd9bea95b06190efee6c64d874798b6045c5d553a1f68c95f143d0a6893877796fff452851d64ac73c007b91dd6058a5c31165003d9d66b4a1a40c2f82e5c3be6820b109addc0f088c84576e30c7202da3304636de4035f3ca8b032885aa2bedb4d1e134c1615139fb6ed7fa924c2e8abdfcd75da029e910ee8a9d4af594e2a9732115237b6ba3c24f8dfd4bed0a7cb4d96e114bff30e9c68226ae04de6fee2340b41c49cd08982a3f21169853366882a4af43e256cb0d09c88856c46f2ad8a7bcc3896efe5f4f104ef9b595cd08b4b76d6ac074f4fa4a488f508c6106603cb4ca65af819d2222a086ddd16a63021627f337ab9d86b33150808313bfe7368737bf38e7dee410cf08f2effef780d161e2cb734135bba36fe2ee3319cda95242b89b50673c88eb3dfa331e987e3fbde92cec7e019990d97b11c71d5b04b8ec451549abc9ed195a080aefb1d77eff476f9de4315fca5bf6386438869a8d59a5f0badda70b337bb9bdcff966229d631286d3c5b97c41f3ef5daa6ef4416577815214733e8602ef7f8abc3a19ee58f48b10c8ab1d5c76f01febdb29b36910d615d4022849ec117f02b6ae898cc0ff67e61df43284d3ff739ab4c34fe2854797ae0b66e0ba234e236daba6eb9172e9e1f4a0f5283ae9b336059d2ab2c7145e0a4de4b5bed3baf87c90ad4d47b94eb1c01b07510191f06b9eaf014e225b2bce46d5a7080c6d1daf64460836d7630c157e44afc9483a777d76fcafbfc2c4f299211c0465f0151f13707f815700944ad6a17e23e63dd0eecb5cdb5284ad92dd853e0ce136bc77633fef514e6aadeb61e7fe885fe399076cbd5464a6d17efa1e116853e80cf08adea7e550b0d27e6a96d835069674fd7bcc$64", "12345"},
#endif
// EC private key
{"$sshng$3$16$00B535FBA963402F20C12648A59D7258$128$dfa09369ff38f33c9789d33760d16fdd47730311b41b51a0c7b1dd1dec850c5c2ff523710af12839f25a709f0076cdd3e3643fab2ea1d17c6fae52a797b55e752b71a1fdd46d5bd889b51ddc2a01922340e5be914a67dabf666aff1c88275bd8ec3529e26386279adeb480446ab869dc27c160bd8fe469d5f993b90aaffef8ce", "password123"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char salt[16];
unsigned char ct[N];
int cipher;
int ctl;
int sl;
int rounds;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char buf[sizeof(struct custom_salt)+100];
strnzcpy(buf, ciphertext, sizeof(buf));
strlwr(buf);
return buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, cipher, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* cipher */
goto err;
if (!isdec(p))
goto err;
cipher = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* salt len */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlen(p, &extra) != len * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext */
goto err;
if (hexlen(p, &extra) / 2 != len || extra)
goto err;
if (cipher == 2) {
if ((p = strtokm(NULL, "$")) == NULL) /* rounds */
goto err;
if (!isdec(p))
goto err;
}
if (cipher != 0 && cipher != 1 && cipher != 2 && cipher != 3) {
fprintf(stderr, "[ssh-ng] cipher value of %d is not supported!\n", cipher);
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(struct custom_salt));
ctcopy += FORMAT_TAG_LEN; /* skip over "$sshng$" */
p = strtokm(ctcopy, "$");
cs.cipher = atoi(p);
p = strtokm(NULL, "$");
cs.sl = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.sl; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.ctl = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.ctl; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if (cs.cipher == 2) {
p = strtokm(NULL, "$");
cs.rounds = atoi(p);
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#if 0
static void generate_key_bytes(int nbytes, unsigned char *password, unsigned char *key)
{
unsigned char digest[16] = {0};
int keyidx = 0;
int digest_inited = 0;
int size = 0;
int i = 0;
while (nbytes > 0) {
MD5_CTX ctx;
MD5_Init(&ctx);
if (digest_inited) {
MD5_Update(&ctx, digest, 16);
}
MD5_Update(&ctx, password, strlen((const char*)password));
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(digest, &ctx);
digest_inited = 1;
if (nbytes > 16)
size = 16;
else
size = nbytes;
/* copy part of digest to keydata */
for(i = 0; i < size; i++)
key[keyidx++] = digest[i];
nbytes -= size;
}
}
#endif
static inline void generate16key_bytes(unsigned char *password,
unsigned char *key)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, password, strlen((const char*)password));
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
/* digest is keydata */
MD5_Final(key, &ctx);
}
static inline void generate24key_bytes(unsigned char *password,
unsigned char *key)
{
unsigned char digest[16];
int len = strlen((const char*)password);
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, password, len);
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
/* digest is keydata */
MD5_Final(key, &ctx);
MD5_Init(&ctx);
MD5_Update(&ctx, key, 16);
MD5_Update(&ctx, password, len);
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(digest, &ctx);
/* 8 more bytes of keydata */
memcpy(&key[16], digest, 8);
}
static inline int check_padding_only(unsigned char *out, int length)
{
int pad;
int i;
// check padding
pad = out[length - 1];
if(pad > 16 || length < 16)
return -1;
if (pad < 4) { // XXX is this possible? if yes, will killing this result in too many false positives?
return -1;
}
for(i = length - 1; i > pad; i--) // check for 0102030405060708090a like sequence
if(out[i] - 1 != out[i - 1])
return -1;
return 0; // valid padding!
}
static inline int check_padding_and_structure_EC(unsigned char *out, int length, int strict_mode)
{
struct asn1_hdr hdr;
const uint8_t *pos, *end;
// First check padding
if (check_pkcs_pad(out, length, 16) < 0)
return -1;
/* check BER decoding, EC private key file contains:
*
* SEQUENCE, INTEGER (length 1), OCTET STRING, cont, OBJECT, cont, BIT STRING
*
* $ ssh-keygen -t ecdsa -f unencrypted_ecdsa_sample.key # don't use a password for testing
* $ openssl asn1parse -in unencrypted_ecdsa_sample.key # see the underlying structure
*/
// SEQUENCE
if (asn1_get_next(out, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload;
end = pos + hdr.length;
// version Version (Version ::= INTEGER)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length != 1)
goto bad;
// OCTET STRING
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_OCTETSTRING) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length < 8) // "secp112r1" curve uses 112 bit prime field, rest are bigger
goto bad;
// XXX add more structure checks!
return 0;
bad:
return -1;
}
static inline int check_padding_and_structure(unsigned char *out, int length, int strict_mode)
{
struct asn1_hdr hdr;
const uint8_t *pos, *end;
// First check padding
if (check_pkcs_pad(out, length, 16) < 0)
return -1;
/* check BER decoding, private key file contains:
*
* RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p }
* DSAPrivateKey = { version = 0, p, q, g, y, x }
*
* openssl asn1parse -in test_rsa.key # this shows the structure nicely!
*/
// SEQUENCE
if (asn1_get_next(out, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload;
end = pos + hdr.length;
// version Version (Version ::= INTEGER)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
// INTEGER (big one)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
/* NOTE: now this integer has to be big, is this always true?
* RSA (as used in ssh) uses big prime numbers, so this check should be OK */
if (hdr.length < 64) {
goto bad;
}
if (strict_mode) {
// INTEGER (small one)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
// INTEGER (big one again)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length < 32) {
goto bad;
}
}
return 0;
bad:
return -1;
}
int bcrypt_pbkdf(const char *pass, size_t passlen, const uint8_t *salt, size_t saltlen,
uint8_t *key, size_t keylen, unsigned int rounds);
static void common_crypt_code(char *password, unsigned char *out, int full_decrypt)
{
if (cur_salt->cipher == 0) {
unsigned char key[24] = {0};
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
generate24key_bytes((unsigned char*)password, key);
memset(out, 0, SAFETY_FACTOR);
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, cur_salt->salt, 8);
if (full_decrypt) {
DES_ede3_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
} else {
DES_ede3_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
DES_ede3_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out + cur_salt->ctl - 32, 32, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
}
} else if (cur_salt->cipher == 1) {
unsigned char key[16] = {0};
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->salt, 16);
memset(out, 0, SAFETY_FACTOR);
memset(out + cur_salt->ctl - 32, 0, 32);
generate16key_bytes((unsigned char*)password, key);
AES_set_decrypt_key(key, 128, &akey);
if (full_decrypt) {
AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT);
} else {
AES_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT); // are starting SAFETY_FACTOR bytes enough?
// decrypting 1 blocks (16 bytes) is enough for correct padding check
}
memcpy(iv, cur_salt->ct + cur_salt->ctl - 32, 16);
AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 16, out + cur_salt->ctl - 16, 16, &akey, iv, AES_DECRYPT);
} else if (cur_salt->cipher == 2) { /* new ssh key format handling */
unsigned char key[32+16] = {0};
AES_KEY akey;
unsigned char iv[16];
// derive (key length + iv length) bytes
bcrypt_pbkdf(password, strlen((const char*)password), cur_salt->salt, 16, key, 32 + 16, cur_salt->rounds);
AES_set_decrypt_key(key, 256, &akey);
memcpy(iv, key + 32, 16);
AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out, 32, &akey, iv, AES_DECRYPT); // decrypt 2 blocks
} else if (cur_salt->cipher == 3) { // EC keys with AES-128
unsigned char key[16] = {0};
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->salt, 16);
memset(out, 0, N);
generate16key_bytes((unsigned char*)password, key);
AES_set_decrypt_key(key, 128, &akey);
AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT); // full decrypt
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char out[N];
common_crypt_code(saved_key[index], out, 0); // don't do full decryption (except for EC keys)
if (cur_salt->cipher == 0 || cur_salt->cipher == 1) {
if (check_padding_and_structure(out, cur_salt->ctl, 0) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->cipher == 2) { // new ssh key format handling
if (check_padding_only(out + 16, 16) == 0) /* always check the last block (16 bytes) */
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->cipher == 3) { // EC keys
if (check_padding_and_structure_EC(out, cur_salt->ctl, 0) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
unsigned char out[N];
common_crypt_code(saved_key[index], out, 1); // do full decryption!
if (cur_salt->cipher == 0 || cur_salt->cipher == 1) {
if (check_padding_and_structure(out, cur_salt->ctl, 1) == 0)
return 1;
else
return 0;
} else if (cur_salt->cipher == 2) { /* new ssh key format handling */
if (check_padding_only(out + 16, 16) == 0) /* always check the last block (16 bytes) */
return 1;
else
return 0;
} else if (cur_salt->cipher == 3) { // EC keys
return 1;
}
return 0;
}
static void sshng_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_sshng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
sshng_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
sshng_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_BIO_NEW */
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/feature.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
PixelPacket
*q;
ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
q->red=QuantumRange;
q->green=QuantumRange;
q->blue=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MaxTextExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,DeactivateAlphaChannel);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
const PixelPacket
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p++;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
q->red=0;
q->green=0;
q->blue=0;
q++;
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
const PixelPacket
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageChannelFeatures(image,1,exception);
% contrast=channel_features[RedChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageChannelFeatures method is:
%
% ChannelFeatures *GetImageChannelFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
DoublePixelPacket
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
LongPixelPacket
gray,
*grays;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
ssize_t
y;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=CompositeChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (LongPixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].opacity=(~0U);
grays[i].index=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(p))].red=
ScaleQuantumToMap(GetPixelRed(p));
grays[ScaleQuantumToMap(GetPixelGreen(p))].green=
ScaleQuantumToMap(GetPixelGreen(p));
grays[ScaleQuantumToMap(GetPixelBlue(p))].blue=
ScaleQuantumToMap(GetPixelBlue(p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index=
ScaleQuantumToMap(GetPixelIndex(indexes+x));
if (image->matte != MagickFalse)
grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity=
ScaleQuantumToMap(GetPixelOpacity(p));
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[(ssize_t) gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[(ssize_t) gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[(ssize_t) gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].index != ~0U)
grays[(ssize_t) gray.index++].index=grays[i].index;
if (image->matte != MagickFalse)
if (grays[i].opacity != ~0U)
grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.index > number_grays)
number_grays=gray.index;
if (image->matte != MagickFalse)
if (gray.opacity > number_grays)
number_grays=gray.opacity;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
ssize_t
i,
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+
2*distance,distance+2,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=distance;
indexes+=distance;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset)))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset)))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p)))
u++;
while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x)))
u++;
while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset)))
v++;
cooccurrence[u][v].direction[i].index++;
cooccurrence[v][u].direction[i].index++;
}
if (image->matte != MagickFalse)
{
u=0;
v=0;
while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p)))
u++;
while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity))
v++;
cooccurrence[u][v].direction[i].opacity++;
cooccurrence[v][u].direction[i].opacity++;
}
}
p++;
}
}
grays=(LongPixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].index*=normalize;
if (image->matte != MagickFalse)
cooccurrence[x][y].direction[i].opacity*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BlueChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].index*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].opacity*
cooccurrence[x][y].direction[i].opacity;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].index+=x*y*
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
correlation.direction[i].opacity+=x*y*
cooccurrence[x][y].direction[i].opacity;
/*
Inverse Difference Moment.
*/
channel_features[RedChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BlueChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[y+x+2].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Entropy.
*/
channel_features[RedChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BlueChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].entropy[i]-=
cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].entropy[i]-=
cooccurrence[x][y].direction[i].opacity*
MagickLog10(cooccurrence[x][y].direction[i].opacity);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_x[x].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_y[y].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].index+=y*sum[y].direction[i].index;
sum_squares.direction[i].index+=y*y*sum[y].direction[i].index;
}
if (image->matte != MagickFalse)
{
mean.direction[i].opacity+=y*sum[y].direction[i].opacity;
sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BlueChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].correlation[i]=
(correlation.direction[i].index-mean.direction[i].index*
mean.direction[i].index)/(sqrt(sum_squares.direction[i].index-
(mean.direction[i].index*mean.direction[i].index))*sqrt(
sum_squares.direction[i].index-(mean.direction[i].index*
mean.direction[i].index)));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].correlation[i]=
(correlation.direction[i].opacity-mean.direction[i].opacity*
mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity-
(mean.direction[i].opacity*mean.direction[i].opacity))*sqrt(
sum_squares.direction[i].opacity-(mean.direction[i].opacity*
mean.direction[i].opacity)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_average[i]+=
x*density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_average[i]+=
x*density_xy[x].direction[i].opacity;
/*
Sum entropy.
*/
channel_features[RedChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Sum variance.
*/
channel_features[RedChannel].sum_variance[i]+=
(x-channel_features[RedChannel].sum_entropy[i])*
(x-channel_features[RedChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenChannel].sum_variance[i]+=
(x-channel_features[GreenChannel].sum_entropy[i])*
(x-channel_features[GreenChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BlueChannel].sum_variance[i]+=
(x-channel_features[BlueChannel].sum_entropy[i])*
(x-channel_features[BlueChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].sum_variance[i]+=
(x-channel_features[IndexChannel].sum_entropy[i])*
(x-channel_features[IndexChannel].sum_entropy[i])*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].sum_variance[i]+=
(x-channel_features[OpacityChannel].sum_entropy[i])*
(x-channel_features[OpacityChannel].sum_entropy[i])*
density_xy[x].direction[i].opacity;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=(y-mean.direction[i].index+1)*
(y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)*
(y-mean.direction[i].opacity+1)*
cooccurrence[x][y].direction[i].opacity;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].index+=
cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index*
MagickLog10(cooccurrence[x][y].direction[i].index);
if (image->matte != MagickFalse)
entropy_xy.direction[i].opacity-=
cooccurrence[x][y].direction[i].opacity*MagickLog10(
cooccurrence[x][y].direction[i].opacity);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].index-=(
cooccurrence[x][y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy1.direction[i].opacity-=(
cooccurrence[x][y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(
density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(
density_x[x].direction[i].green*density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(
density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].index-=(density_x[x].direction[i].index*
density_y[y].direction[i].index*MagickLog10(
density_x[x].direction[i].index*density_y[y].direction[i].index));
if (image->matte != MagickFalse)
entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity*MagickLog10(
density_x[x].direction[i].opacity*
density_y[y].direction[i].opacity));
}
}
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BlueChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].index;
if (image->matte != MagickFalse)
channel_features[RedChannel].variance_sum_of_squares[i]=
variance.direction[i].opacity;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].index+=density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
variance.direction[i].opacity+=density_xy[x].direction[i].opacity;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].index+=density_xy[x].direction[i].index*
density_xy[x].direction[i].index;
if (image->matte != MagickFalse)
sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity*
density_xy[x].direction[i].opacity;
/*
Difference entropy.
*/
channel_features[RedChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BlueChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_entropy[i]-=
density_xy[x].direction[i].index*
MagickLog10(density_xy[x].direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_entropy[i]-=
density_xy[x].direction[i].opacity*
MagickLog10(density_xy[x].direction[i].opacity);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].index-=(density_x[x].direction[i].index*
MagickLog10(density_x[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity*
MagickLog10(density_x[x].direction[i].opacity));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].index-=(density_y[x].direction[i].index*
MagickLog10(density_y[x].direction[i].index));
if (image->matte != MagickFalse)
entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity*
MagickLog10(density_y[x].direction[i].opacity));
}
/*
Difference variance.
*/
channel_features[RedChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BlueChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].opacity)-
(variance.direction[i].opacity*variance.direction[i].opacity))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].index)-
(variance.direction[i].index*variance.direction[i].index))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BlueChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/
(entropy_x.direction[i].index > entropy_y.direction[i].index ?
entropy_x.direction[i].index : entropy_y.direction[i].index);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/
(entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ?
entropy_x.direction[i].opacity : entropy_y.direction[i].opacity);
channel_features[RedChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BlueChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index-
entropy_xy.direction[i].index)))));
if (image->matte != MagickFalse)
channel_features[OpacityChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity-
entropy_xy.direction[i].opacity)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].index+=cooccurrence[x][y].direction[i].index;
if (image->matte != MagickFalse)
pixel.direction[i].opacity+=
cooccurrence[x][y].direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
*/
if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].blue) > MagickEpsilon))
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/
density_x[z].direction[i].blue/density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
if ((fabs(density_x[z].direction[i].index) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].index) > MagickEpsilon))
Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index*
cooccurrence[y][x].direction[i].index/
density_x[z].direction[i].index/density_y[x].direction[i].index;
if (image->matte != MagickFalse)
if ((fabs(density_x[z].direction[i].opacity) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].opacity) > MagickEpsilon))
Q[z][y].direction[i].opacity+=
cooccurrence[z][x].direction[i].opacity*
cooccurrence[y][x].direction[i].opacity/
density_x[z].direction[i].opacity/
density_y[x].direction[i].opacity;
}
}
channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red;
channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green;
channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackChannel].contrast[i]+=z*z*
pixel.direction[i].index;
if (image->matte != MagickFalse)
channel_features[OpacityChannel].contrast[i]+=z*z*
pixel.direction[i].opacity;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BlueChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[IndexChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->matte != MagickFalse)
channel_features[OpacityChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator
% matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2).% Next it searches this space for peaks in counts and converts the locations
% of the peaks to slope and intercept in the normal x,y input image space. Use
% the slope/intercepts to find the endpoints clipped to the bounds of the
% image. The lines are then drawn. The counts are a measure of the length of
% the lines.
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->x_resolution == 0.0 ? 1.0 : image->x_resolution/
DefaultResolution;
draw_info->affine.sy=image->y_resolution == 0.0 ? 1.0 : image->y_resolution/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MaxTextExtent],
path[MaxTextExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HoughLineImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MaxTextExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n",
(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MaxTextExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MaxTextExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MaxTextExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsMagickTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse)
{
InheritException(exception,&mean_image->exception);
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
MagickPixelPacket
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
ssize_t
i;
GetMagickPixelPacket(image,&mean_pixel);
SetMagickPixelPacket(image,p,indexes+x,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
MagickPixelPacket
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetMagickPixelPacket(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelPacket
pixel;
status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.opacity+=pixel.opacity;
count++;
}
}
}
}
gamma=PerceptibleReciprocal(count);
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.opacity=gamma*sum_pixel.opacity;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
q->red=ClampToQuantum(mean_pixel.red);
q->green=ClampToQuantum(mean_pixel.green);
q->blue=ClampToQuantum(mean_pixel.blue);
q->opacity=ClampToQuantum(mean_pixel.opacity);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(16*t3+Nx+12,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),32*t4+30),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__round_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fp32_fp32)
// op(A') function: GB (_unop_tran__round_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = roundf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = roundf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = roundf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = roundf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = roundf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
miss_count.c | /*
* Miss Count micro benchmark
*
*
*
*
*
* A and B are both square matrix. They are statically allocated and
* initialized with constant number, so we can focus on the parallelism.
*
* usage: mpirun -np <N> mm_mpi [-t]
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#ifdef USE_CALI
#include <caliper/cali.h>
#endif
#ifndef ORDER
#define ORDER 1000 // the order of the matrix
#endif
#define AVAL 3.0 // initial value of A
#define BVAL 5.0 // initial value of B
#define TOL 0.001 // tolerance used to check the result
#define TYPE double
#define TRUE 1
#define FALSE 0
#define CACHE_L1 1
#define CACHE_L2 2
#define CACHE_L3 3
struct Inputs {
char cache_level;
int threads;
};
void test_L1();
void get_input(int argc, char **argv, struct Inputs* input);
void matrix_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len);
void transpose_mat(TYPE* A, int order);
void matrix_free(TYPE* A, TYPE* B, TYPE* C, size_t size);
void print_mat(TYPE* C);
// main function
int main(int argc, char **argv) {
size_t i,j,k,r;
double run_time, start, end;
struct Inputs input;
get_input(argc, argv, &input);
omp_set_num_threads(input.threads);
start = omp_get_wtime();
#ifdef USE_CALI
cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS);
#pragma omp parallel
{
cali_set_int(thread_attr, omp_get_thread_num());
}
#endif
#pragma omp parallel
{
test_L1();
}
end = omp_get_wtime();
printf("Run time: %f\n", end - start);
return 0;
}
// the square matrices are ORDERxORDER
// For Skyake the L1 cache is 32k = 4000 doubles
// - so I'm assuming it is divisible by the cache line
// - 64 bytes = 8 doubles
// 64*64 = 4096 doubles
void test_L1() {
// int order = 2000;
int order = 64;
int exp_count = 0;
size_t i,j,k,r;
int cache_line_order[] = {6,1,5,2,0,7,3,4};
TYPE *A, *B, *C;
matrix_init(&A, &B, &C, order);
for (exp_count = 0; exp_count < 100; exp_count++) {
#ifdef USE_CALI
CALI_MARK_BEGIN("cache_prep");
#endif
// Load all matrices into the L2 cache
// fill L1 cache with the C matrix
transpose_mat(A, order);
transpose_mat(B, order);
transpose_mat(C, order);
#ifdef USE_CALI
CALI_MARK_END("cache_prep");
#endif
#ifdef USE_CALI
CALI_MARK_BEGIN("cache_test");
#endif
for (int k = 0; k < 8; k++) { // choose element in the cache line
for (int i = 0; i < order; i+=8) { // choose the row
for (int j = 0; j < 8; j++) { // choose the cache line column
int index_1 = i*order + cache_line_order[j]*8 + k;
int index_2 = i*order + cache_line_order[j]*8 + 3;
A[index_1] = A[index_1] + A[index_2];
}
}
}
#ifdef USE_CALI
CALI_MARK_END("cache_test");
#endif
}
// for(i = 0; i < order; i++){
// for(j = 0; j < order; j++){
// for(k = 0; k < order; k++) {
// C[i*order+j] += A[i*order+k] * B[j*order+k];
// }
// }
// }
matrix_free(A,B,C,order);
}
/*************************************************************\
Utility Functions
\*************************************************************/
void get_input(int argc, char **argv, struct Inputs* input) {
int i = 1;
input->cache_level = CACHE_L1;
input->threads = 4;
for(i = 1; i < argc; i++) {
if ( !(strcmp("-1", argv[i])) || !(strcmp("--cache_l1", argv[i])) )
input->cache_level = CACHE_L1;
else if ( !(strcmp("-2", argv[i])) || !(strcmp("--cache_l2", argv[i])) )
input->cache_level = CACHE_L2;
else if ( !(strcmp("-3", argv[i])) || !(strcmp("--cache_l3", argv[i])) )
input->cache_level = CACHE_L3;
if ( !(strcmp("-t", argv[i])) || !(strcmp("--threads", argv[i])) ) {
if (i++ < argc){
input->threads = atoi(argv[i]);
} else {
printf("Please include a thread count that option\n");
exit(1);
}
}
}
}
// Initialize the matrices (uniform values to make an easier check)
void matrix_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len) {
size_t i, j;
if( ((row_len*row_len) % 64) != 0 ) {
printf("ERROR aligning memory; make sure size is multiple of 64 bytes.\n");
exit(1);
}
(*A) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
(*B) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
(*C) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
if( ((*A) == NULL) || ((*B) == NULL) || ((*C) == NULL) ) {
printf("ERROR allocating memory\n");
exit(1);
}
for (j=0; j<row_len*row_len; j++) {
(*A)[j] = AVAL;
(*B)[j] = BVAL;
(*C)[j] = 0.0;
}
}
void transpose_mat(TYPE* A, int order) {
TYPE temp;
for (int i=1; i<order; i++) {
for (int j=i; j<order; j++) {
temp = A[i*order+j];
A[i*order+j]=A[j*order+i];
A[j*order+i]=temp;
}
}
}
void matrix_free(TYPE* A, TYPE* B, TYPE* C, size_t size) {
free(A);
free(B);
free(C);
}
void print_mat(TYPE* C) {
size_t i, j;
double e = 0.0;
double ee = 0.0;
double v = AVAL * BVAL * ORDER;
for (i=0; i<ORDER; i++) {
for (j=0; j<ORDER; j++) {
printf("%f ",C[i*ORDER+j]);
}
printf("\n\n");
}
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.